peacock-data-public-datasets-idc-mint
/
docker
/bloom13b
/Model-References
/MLPERF3.1
/Training
/benchmarks
/gpt3
/megatron
/training.py
# coding=utf-8 | |
# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company. | |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""Pretrain utilities.""" | |
from datetime import datetime | |
import math | |
import sys | |
import os | |
import time | |
import json | |
import numpy as np | |
# The earliest we can measure the start time. | |
# TODO: Workaround as not supporting float64 | |
_TRAIN_START_TIME = time.time() | |
import torch | |
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP | |
from megatron import get_args | |
from megatron import get_timers | |
from megatron import get_tensorboard_writer | |
from megatron import get_current_global_batch_size | |
from megatron import get_num_microbatches, get_num_eval_microbatches | |
from megatron import is_last_rank | |
from megatron import update_num_microbatches | |
from megatron import mpu | |
from megatron import print_rank_0 | |
from megatron import print_rank_last | |
from megatron.checkpointing import load_checkpoint | |
from megatron.checkpointing import save_checkpoint | |
from megatron.model import Float16Module | |
from megatron.mpu.data import reset_cached_broadcast_sizes | |
from megatron.optimizer import get_megatron_optimizer | |
from megatron.initialize import initialize_megatron | |
from megatron.initialize import write_args_to_tensorboard | |
from megatron.learning_rates import AnnealingLR | |
from megatron.model import DistributedDataParallel as LocalDDP | |
from megatron.utils import check_adlr_autoresume_termination | |
from megatron.utils import unwrap_model, found_kill_switch | |
from megatron.data.data_samplers import build_pretraining_data_loader | |
from megatron.data.gpt_dataset import build_train_valid_test_datasets | |
from megatron.utils import calc_params_l2_norm | |
from megatron.schedules import forward_backward_no_pipelining | |
from megatron.schedules import forward_backward_pipelining_without_interleaving | |
from megatron.schedules import forward_backward_pipelining_with_interleaving | |
from megatron.utils import report_memory, throughput_calculator, checkpoint_throughput_calculator | |
from megatron.global_vars import get_current_device, get_current_device_index | |
from megatron.profiler import setup_profiler, trigger, on_step_begin, on_step_end | |
import deepspeed | |
from deepspeed.compression.compress import init_compression, redundancy_clean | |
from contextlib import nullcontext | |
from megatron.mpu.layers import ColumnParallelLinear, RowParallelLinear | |
import habana_frameworks.torch.core as htcore | |
def print_datetime(string): | |
"""Note that this call will sync across all ranks.""" | |
torch.distributed.barrier() | |
time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S') | |
print_rank_0('[' + string + '] datetime: {} '.format(time_str)) | |
FP8_RECIPE=None | |
def get_hpu_fp8_recipe(): | |
from habana_frameworks.torch.hpex.experimental.transformer_engine import recipe | |
global FP8_RECIPE | |
if FP8_RECIPE is None: | |
fp8_format = recipe.Format.E5M2 | |
fp8_margin = 0 | |
fp8_interval = get_args().hpu_fp8_measure_interval | |
FP8_RECIPE = recipe.DelayedScaling( | |
margin=fp8_margin, | |
interval=fp8_interval, | |
fp8_format=fp8_format, | |
amax_history_len=1, | |
amax_compute_algo="most_recent", | |
reduce_amax=False, | |
) | |
return FP8_RECIPE | |
def pretrain(train_valid_test_dataset_provider, | |
model_provider, | |
forward_step_func, | |
extra_args_provider=None, | |
args_defaults={}): | |
"""Main training program. | |
This function will run the followings in the order provided: | |
1) initialize Megatron. | |
2) setup model, optimizer and lr schedule using the model_provider. | |
3) call train_val_test_data_provider to get train/val/test datasets. | |
4) train the modle using the forward_step_func. | |
Arguments: | |
train_valid_test_dataset_provider: a function that takes the size of | |
train/valid/test dataset and returns `train, valid, test` datasets. | |
model_provider: a function that returns a vanilla version of the | |
model. By vanilla we mean a simple model on cpu with no fp16 or ddp. | |
forward_step_func: a function that takes a `data iterator` and `model`, | |
and returns a `loss` scalar with a dictionary with key:values being | |
the info we would like to monitor during training, for example | |
`lm-loss: value`. We also require that this function add | |
`batch generator` to the timers class. | |
extra_args_provider: a function that takes a parser and adds arguments | |
to it. It is used for programs to add their own arguments. | |
args_defaults: a dictionary from argument-name to argument-value. It | |
to set already parse arguments. | |
""" | |
# Initalize and get arguments, timers, and Tensorboard writer. | |
_, mllogger = initialize_megatron(extra_args_provider=extra_args_provider, | |
args_defaults=args_defaults) | |
args = get_args() | |
if found_kill_switch(): | |
print_datetime(f"Detected kill switch at {args.kill_switch_path}. Exiting") | |
sys.exit() | |
# Adjust the startup time so it reflects the largest value. | |
# This will be closer to what scheduler will see (outside of | |
# image ... launches. | |
global _TRAIN_START_TIME | |
# TODO: Make it back torch.DoubleTensor once supporting float64 | |
start_time_tensor = torch.FloatTensor([_TRAIN_START_TIME]).to(get_current_device()) | |
torch.distributed.all_reduce(start_time_tensor, | |
op=torch.distributed.ReduceOp.MIN) | |
_TRAIN_START_TIME = start_time_tensor.item() | |
print_rank_0('time to initialize megatron (seconds): {:.3f}'.format( | |
time.time() - _TRAIN_START_TIME)) | |
print_datetime('after megatron is initialized') | |
timers = get_timers() | |
if args.deepspeed: | |
args.deepspeed_configuration = json.load( | |
open(args.deepspeed_config, 'r', encoding='utf-8')) | |
if "curriculum_learning" in args.deepspeed_configuration and \ | |
"enabled" in args.deepspeed_configuration["curriculum_learning"]: | |
args.curriculum_learning = args.deepspeed_configuration[ | |
"curriculum_learning"]["enabled"] | |
if args.curriculum_learning and not args.no_pipeline_parallel: | |
from deepspeed.runtime.data_pipeline.curriculum_scheduler \ | |
import CurriculumScheduler | |
args.curriculum_scheduler = CurriculumScheduler( | |
args.deepspeed_configuration["curriculum_learning"]) | |
if "compression_training" in args.deepspeed_configuration: | |
args.compression_training = True | |
if args.universal_checkpoint: | |
args.deepspeed_configuration["checkpoint"] = {"load_universal": True} | |
# Clear deepspeed_config to force deepspeed to take config from args.deepspeed_configuration at initialize() | |
args.deepspeed_config = None | |
mllogger.event(key=mllogger.constants.SUBMISSION_ORG, value='Habana') | |
mllogger.event(key=mllogger.constants.SUBMISSION_PLATFORM, value=f'gaudi-{torch.distributed.get_world_size()}') | |
mllogger.event(key=mllogger.constants.SUBMISSION_STATUS, value='onprem') | |
mllogger.event(key=mllogger.constants.SUBMISSION_DIVISION, value='closed') | |
mllogger.event(key=mllogger.constants.SUBMISSION_BENCHMARK, value='gpt3') | |
mllogger.event(key=mllogger.constants.SEED, value=args.seed, sync=False) | |
mllogger.event(key=mllogger.constants.CACHE_CLEAR) | |
if args.optimizer in ['adam', 'adamw', 'fusedadamw']: | |
opt_name = mllogger.constants.ADAM | |
elif args.optimizer == 'sgd': | |
opt_name = mllogger.constants.SGD | |
else: | |
raise Exception('Unknown optimizer {}.'.format(args.optimizer)) | |
mllogger.event(key="opt_name", value=opt_name, sync=False) | |
mllogger.event(key="opt_adam_beta_1", value=args.adam_beta1, sync=False) | |
mllogger.event(key="opt_adam_beta_2", value=args.adam_beta2, sync=False) | |
mllogger.event(key="opt_adam_epsilon", value=args.adam_eps, sync=False) | |
mllogger.event(key="opt_weight_decay", value=args.weight_decay, sync=False) | |
mllogger.event(key=mllogger.constants.OPT_BASE_LR, value=args.lr, sync=False) | |
mllogger.event(key="opt_end_learning_rate", value=args.min_lr, sync=False) | |
if args.lr_decay_samples is not None: | |
mllogger.event(key="opt_learning_rate_decay_steps", value=math.ceil(args.lr_decay_samples / args.global_batch_size), sync=False) | |
mllogger.event(key="opt_learning_rate_warmup_steps", value=math.ceil(args.lr_warmup_samples / args.global_batch_size), sync=False) | |
mllogger.event(key="opt_learning_rate_decay_schedule", value="cosine with linear warmup", sync=False) | |
mllogger.event(key="opt_gradient_clip_norm", value=args.clip_grad, sync=False) | |
mllogger.event(key="init_checkpoint_step", value=math.ceil(args.ext_lr_steps / args.global_batch_size), sync=False) | |
mllogger.event(key=mllogger.constants.GLOBAL_BATCH_SIZE, value=args.global_batch_size, sync=False) | |
mllogger.event(key=mllogger.constants.GRADIENT_ACCUMULATION_STEPS, | |
value=get_num_microbatches(), sync=False, unique=True) | |
mllogger.event(key="max_sequence_length", value=args.seq_length, sync=False) | |
mllogger.event(key=mllogger.constants.EVAL_SAMPLES, value=11590004, sync=False) | |
mllogger.event(key="num_layers", value=args.num_layers, sync=False) | |
mllogger.event(key="num_heads", value=args.num_attention_heads, sync=False) | |
mllogger.event(key="hidden_size", value=args.hidden_size, sync=False) | |
mllogger.event(key="ffn_hidden_size", value=args.ffn_hidden_size, sync=False) | |
mllogger.event(key="hidden_dropout", value=args.hidden_dropout, sync=False) | |
mllogger.event(key="attention_dropout", value=args.attention_dropout, sync=False) | |
mllogger.event(key="layernorm_epsilon", value=args.layernorm_epsilon, sync=False) | |
mllogger.event(key="tokenizer", value="SPM", sync=False) | |
mllogger.event(key="dataset", value="C4", sync=False) | |
# Model, optimizer, and learning rate. | |
teacher_model = None | |
if args.mos or args.kd: # Set up teacher model | |
teacher_model = setup_teacher_model(args, model_provider) | |
timers('model-and-optimizer-setup').start() | |
model, optimizer, lr_scheduler = setup_model_and_optimizer(model_provider, teacher=False) | |
timers('model-and-optimizer-setup').stop() | |
print_datetime('after model, optimizer, and learning rate ' | |
'scheduler are built') | |
iteration = args.iteration | |
if args.device_warmup: | |
assert args.warmup_dataset_path is not None, f'--warmup-dataset-path not provided' | |
warmup_train_iterator, warmup_valid_iterator = build_warmup_iterators() | |
prefix = 'Warmup' | |
warmup(args, forward_step_func, warmup_train_iterator, warmup_valid_iterator, model, optimizer, lr_scheduler, teacher_model, prefix, iteration, mllogger) | |
mllogger.log_init_stop_run_start() | |
# Data stuff. | |
timers('train/valid/test-data-iterators-setup').start() | |
if args.virtual_pipeline_model_parallel_size is not None: | |
all_data_iterators = [ | |
build_train_valid_test_data_iterators(train_valid_test_dataset_provider) | |
for _ in range(len(model)) | |
] | |
train_data_iterator = [data_iterators[0] for data_iterators in all_data_iterators] | |
valid_data_iterator = [data_iterators[1] for data_iterators in all_data_iterators] | |
test_data_iterator = [data_iterators[2] for data_iterators in all_data_iterators] | |
else: | |
train_data_iterator, valid_data_iterator, test_data_iterator \ | |
= build_train_valid_test_data_iterators( | |
train_valid_test_dataset_provider) | |
timers('train/valid/test-data-iterators-setup').stop() | |
print_datetime('after dataloaders are built') | |
# Print setup timing. | |
print_rank_0('done with setup ...') | |
timers.log(['model-and-optimizer-setup', 'train/valid/test-data-iterators-setup']) | |
iteration = args.iteration | |
mllogger.start(key=mllogger.constants.BLOCK_START, | |
metadata={'first_epoch_num': 0, | |
'epoch_count': args.eval_interval * args.global_batch_size * args.seq_length}, | |
sync=False) | |
mllogger.start(key=mllogger.constants.EPOCH_START, | |
metadata={'epoch_num': 0}, | |
sync=False) | |
if args.do_valid and args.do_pretrain_validation: | |
prefix = 'evaluation on val data for the initial checkpoint weights' | |
evaluate_and_print_results(prefix, forward_step_func, | |
valid_data_iterator, model, | |
iteration, False, mllogger=mllogger) | |
if args.do_train and args.train_iters > 0: | |
print_rank_0('training ...') | |
iteration = train(forward_step_func, | |
model, optimizer, lr_scheduler, | |
train_data_iterator, valid_data_iterator, | |
teacher_model=teacher_model, mllogger=mllogger) | |
training_prefix = 'the end of training' | |
print_datetime('after training is done') | |
else: | |
training_prefix = 'skipping training' | |
print_rank_0('skipping training ...') | |
if args.do_valid: | |
prefix = ' '.join([training_prefix, 'for val data']) | |
evaluate_and_print_results(prefix, forward_step_func, | |
valid_data_iterator, model, | |
iteration, False, mllogger=mllogger) | |
# Clean the model and do evaluation again | |
if args.compression_training: | |
model = [redundancy_clean(model[0], args.deepspeed_config, mpu)] | |
if args.do_valid: | |
prefix = ' '.join([training_prefix, | |
'and after model cleaning for val data']) | |
evaluate_and_print_results(prefix, forward_step_func, | |
valid_data_iterator, model, | |
iteration, False, mllogger=mllogger) | |
if args.save and (iteration != args.iteration or args.universal_checkpoint): | |
save_checkpoint(iteration, model, optimizer, lr_scheduler) | |
if args.do_test: | |
# Run on test data. | |
prefix = ' '.join([training_prefix, 'for test data']) | |
evaluate_and_print_results(prefix, forward_step_func, | |
test_data_iterator, model, | |
0, True, mllogger=mllogger) | |
mllogger.event(key=mllogger.constants.TRAIN_SAMPLES, | |
value=args.consumed_train_samples - args.ext_lr_steps, | |
sync=False) | |
mllogger.end(key=mllogger.constants.BLOCK_STOP, | |
metadata={'first_epoch_num': 0}, | |
sync=False) | |
mllogger.end(key=mllogger.constants.EPOCH_STOP, | |
metadata={'epoch_num': (args.consumed_train_samples - args.ext_lr_steps) | |
* args.seq_length}, sync=False) | |
status = 'fail' | |
mllogger.log_run_stop(status) | |
def update_train_iters(args): | |
# For iteration-based training, we don't need to do anything | |
if args.train_iters: | |
return | |
# Constant batch size with sample-based training. | |
if args.rampup_batch_size is None: | |
args.train_iters = args.train_samples // args.global_batch_size | |
else: | |
# Sample based training with rampup batch size. | |
iterations = 0 | |
consumed_samples = 0 | |
# Rampup phase. | |
while consumed_samples <= int(args.rampup_batch_size[2]): | |
update_num_microbatches(consumed_samples, consistency_check=False) | |
consumed_samples += get_current_global_batch_size() | |
iterations += 1 | |
# Reset | |
update_num_microbatches(0, consistency_check=False) | |
# Constant phase | |
# Note that we throw away any partial last batch. | |
iterations += (args.train_samples - consumed_samples) // \ | |
args.global_batch_size | |
args.train_iters = iterations | |
print_rank_0('setting training iterations to {}'.format(args.train_iters)) | |
def setup_teacher_model(args, model_provider): | |
print_rank_0('***>>>>> Student model checkpoint iteration:{}'.format(args.iteration)) | |
iteration_stuent = args.iteration | |
num_layers_student = args.num_layers | |
num_experts_student = args.num_experts | |
hidden_size_student = args.hidden_size | |
num_attention_heads_student = args.num_attention_heads | |
load_student = args.load | |
print_rank_0('***>>>>> Setting up the teacher model') | |
args.num_layers = args.num_layers_teacher | |
args.num_experts = args.num_experts_teacher | |
args.hidden_size = args.hidden_size_teacher | |
args.num_attention_heads = args.num_attention_heads_teacher | |
args.load = args.load_teacher | |
teacher_model, _, _ = load_model_weights_only(model_provider) | |
print_rank_0('***>>>>> Teacher model:{}'.format(teacher_model)) | |
args.num_layers = num_layers_student | |
args.num_experts = num_experts_student | |
args.hidden_size = hidden_size_student | |
args.num_attention_heads = num_attention_heads_student | |
args.load = load_student | |
args.iteration = iteration_stuent | |
return teacher_model | |
def get_model(model_provider_func): | |
"""Build the model.""" | |
args = get_args() | |
# Build model. | |
if mpu.get_pipeline_model_parallel_world_size() > 1 and \ | |
args.virtual_pipeline_model_parallel_size is not None: | |
model = [] | |
for i in range(args.virtual_pipeline_model_parallel_size): | |
mpu.set_virtual_pipeline_model_parallel_rank(i) | |
# Set pre_process and post_process only after virtual rank is set. | |
pre_process = mpu.is_pipeline_first_stage() | |
post_process = mpu.is_pipeline_last_stage() | |
this_model = model_provider_func( | |
pre_process=pre_process, | |
post_process=post_process | |
) | |
model.append(this_model) | |
else: | |
pre_process = mpu.is_pipeline_first_stage() | |
post_process = mpu.is_pipeline_last_stage() | |
model = model_provider_func( | |
pre_process=pre_process, | |
post_process=post_process | |
) | |
if not isinstance(model, list): | |
model = [model] | |
# Set tensor model parallel attributes if not set. | |
# Only parameters that are already tensor model parallel have these | |
# attributes set for them. We should make sure the default attributes | |
# are set for all params so the optimizer can use them. | |
for model_module in model: | |
for param in model_module.parameters(): | |
mpu.set_defaults_if_not_set_tensor_model_parallel_attributes(param) | |
# Print number of parameters. | |
if mpu.get_data_parallel_rank() == 0: | |
print(' > number of parameters on (tensor, pipeline) ' | |
'model parallel rank ({}, {}): {}'.format( | |
mpu.get_tensor_model_parallel_rank(), | |
mpu.get_pipeline_model_parallel_rank(), | |
sum([sum([p.ds_numel if hasattr(p, 'ds_id') else p.nelement() for p in model_module.parameters()]) | |
for model_module in model])), flush=True) | |
if args.deepspeed: | |
return model | |
# GPU allocation. | |
for model_module in model: | |
model_module.to(get_current_device()) | |
# Fp16 conversion. | |
if args.fp16 or args.bf16: | |
model = [Float16Module(model_module, args) for model_module in model] | |
if args.DDP_impl == 'torch': | |
i = get_current_device_index() | |
device = get_current_device() | |
model = [torchDDP(model_module, device_ids=[i], output_device=device, | |
process_group=mpu.get_data_parallel_group()) | |
for model_module in model] | |
return model | |
if args.DDP_impl == 'local': | |
model = [LocalDDP(model_module, | |
args.accumulate_allreduce_grads_in_fp32, | |
args.use_contiguous_buffers_in_ddp) | |
for model_module in model] | |
return model | |
raise NotImplementedError('Unknown DDP implementation specified: {}. ' | |
'Exiting.'.format(args.DDP_impl)) | |
def get_learning_rate_scheduler(optimizer): | |
"""Build the learning rate scheduler.""" | |
args = get_args() | |
# Iteration-based training. | |
if args.train_iters: | |
if args.lr_decay_iters is None: | |
args.lr_decay_iters = args.train_iters | |
decay_steps = args.lr_decay_iters * args.global_batch_size | |
if args.lr_warmup_fraction is not None: | |
warmup_steps = args.lr_warmup_fraction * decay_steps | |
else: | |
warmup_steps = args.lr_warmup_iters * args.global_batch_size | |
# Sample-based training. | |
elif args.train_samples: | |
# We need to set training iters for later use. Technically | |
# we need to adjust the training samples too (due to last | |
# batch being incomplete) but we leave it as is for now. | |
update_train_iters(args) | |
if args.lr_decay_samples is None: | |
args.lr_decay_samples = args.train_samples | |
decay_steps = args.lr_decay_samples | |
if args.lr_warmup_fraction is not None: | |
warmup_steps = args.lr_warmup_fraction * decay_steps | |
else: | |
warmup_steps = args.lr_warmup_samples | |
else: | |
raise Exception( | |
'either train-iters or train-samples should be provided.') | |
lr_scheduler = AnnealingLR( | |
optimizer, | |
max_lr=args.lr, | |
min_lr=args.min_lr, | |
warmup_steps=warmup_steps, | |
decay_steps=decay_steps, | |
decay_style=args.lr_decay_style, | |
use_checkpoint_lr_scheduler=args.use_checkpoint_lr_scheduler, | |
override_lr_scheduler=args.override_lr_scheduler) | |
return lr_scheduler | |
def sync_hp_to_lp(optimizer): | |
optimizer.update_lp_params() | |
def load_model_weights_only(model_provider_func): | |
"""Setup model and optimizer.""" | |
args = get_args() | |
print_rank_0('***>>>>> Args:{}'.format(args)) | |
model = get_model(model_provider_func) | |
optimizer = None | |
lr_scheduler = None | |
if args.deepspeed: | |
with open(args.deepspeed_config, 'r') as fd: | |
ds_config = json.load(fd) | |
# When loading just the model weights, ZeRO can be disabled. | |
if 'zero_optimization' in ds_config: | |
del ds_config['zero_optimization'] | |
model, optimizer, _, lr_scheduler = deepspeed.initialize( | |
model=model[0], | |
config=ds_config | |
) | |
assert not isinstance(model, deepspeed.PipelineEngine), \ | |
'Weight loading only mode is not supported in pipeline parallelism yet.' | |
model = [model] | |
print_datetime('before load checkpoint') | |
if args.load is not None and not args.device_warmup: | |
iteration = load_checkpoint(model, optimizer, lr_scheduler, strict=True, load_only_weights=True) | |
print_datetime('after load checkpoint weights') | |
return model, optimizer, lr_scheduler | |
def setup_model_and_optimizer(model_provider_func, teacher=False): | |
"""Setup model and optimizer.""" | |
args = get_args() | |
model = get_model(model_provider_func) | |
# initialize the compression here | |
student_global_steps = 0 | |
if args.kd or args.mos: | |
model, _, _, _ = deepspeed.initialize( | |
model=model[0], | |
args=args, | |
mpu=mpu if args.no_pipeline_parallel else None | |
) | |
model = [model] | |
if args.load is not None and not args.device_warmup: | |
args.iteration = load_checkpoint(model, None, None, strict=False) | |
else: | |
args.iteration = 0 | |
student_global_steps = model[0].global_steps | |
print_rank_0('***>>>>> Student model, global step:{}'.format(student_global_steps)) | |
if args.compression_training: | |
model, _, _, _ = deepspeed.initialize( | |
model=model[0], | |
args=args, | |
mpu=mpu if args.no_pipeline_parallel else None | |
) | |
model = [model] | |
model = [init_compression(model[0].module, args.deepspeed_config, mpu)] | |
unwrapped_model = unwrap_model(model, | |
(torchDDP, LocalDDP, Float16Module)) | |
if args.inference: | |
optimizer = None | |
lr_scheduler = None | |
else: | |
if teacher: | |
optimizer = None | |
else: | |
optimizer = get_megatron_optimizer(unwrapped_model) | |
lr_scheduler = get_learning_rate_scheduler(optimizer) | |
if args.deepspeed: | |
print_rank_0("DeepSpeed is enabled.") | |
model, optimizer, _, lr_scheduler = deepspeed.initialize( | |
model=model[0], | |
optimizer=optimizer, | |
args=args, | |
config=args.deepspeed_configuration, | |
lr_scheduler=lr_scheduler, | |
mpu=mpu if args.no_pipeline_parallel else None | |
) | |
if isinstance(model, deepspeed.PipelineEngine): | |
# hack to get batch_fn from pretrain_gpt.py | |
model.set_batch_fn(model.module._megatron_batch_fn) | |
assert model.grid.get_pipe_parallel_rank() == mpu.get_pipeline_model_parallel_rank() | |
assert model.grid.get_slice_parallel_rank() == mpu.get_tensor_model_parallel_rank() | |
assert model.grid.get_data_parallel_rank() == mpu.get_data_parallel_rank() | |
model = [model] | |
# Compression has its own checkpoint loading path (e.g, loading both teacher and student models). So if compression is enabled, we skip the following checkpoint loading. | |
no_post_init_checkpoint_loading = args.kd or args.mos | |
if not no_post_init_checkpoint_loading: | |
if args.load is not None and not args.device_warmup: | |
timers = get_timers() | |
# Extra barrier is added to make sure all ranks report the | |
# max time. | |
torch.distributed.barrier() | |
timers('load-checkpoint').start() | |
args.iteration = load_checkpoint(model, optimizer, lr_scheduler) | |
torch.distributed.barrier() | |
timers('load-checkpoint').stop() | |
timers.log(['load-checkpoint']) | |
# hp -> lp | |
if args.deepspeed and args.universal_checkpoint: | |
sync_hp_to_lp(optimizer) | |
else: | |
args.iteration = 0 | |
else: | |
model[0].global_steps = student_global_steps | |
# We only support local DDP with multiple micro-batches. | |
if len(model) > 1 or mpu.get_pipeline_model_parallel_world_size() > 1: | |
assert args.DDP_impl == 'local' | |
# get model without FP16 and/or TorchDDP wrappers | |
if args.iteration == 0 and len(unwrapped_model) == 1 \ | |
and hasattr(unwrapped_model[0], 'init_state_dict_from_bert'): | |
print_rank_0("Initializing ICT from pretrained BERT model") | |
unwrapped_model[0].init_state_dict_from_bert() | |
if args.fp16: | |
optimizer.reload_model_params() | |
return model, optimizer, lr_scheduler | |
def deepspeed_train_step(data_iterator, model, args): | |
skipped_iter = 0 | |
num_zeros_in_grad = 0 | |
assert isinstance(model, deepspeed.PipelineEngine) | |
from habana_frameworks.torch.hpex.experimental.transformer_engine import fp8_autocast | |
with fp8_autocast(enabled=args.use_hpu_fp8_transformer_engine, fp8_recipe=get_hpu_fp8_recipe()): | |
loss = model.train_batch(data_iter=data_iterator) | |
grad_norm = model.get_global_grad_norm() | |
return {'lm loss' : loss}, skipped_iter, grad_norm, num_zeros_in_grad | |
def train_step(forward_step_func, data_iterator, | |
model, optimizer, lr_scheduler, teacher_model=None): | |
"""Single training step.""" | |
args = get_args() | |
timers = get_timers() | |
if args.deepspeed and args.ds_pipeline_enabled: | |
return deepspeed_train_step(data_iterator, model[0], args) | |
# Set grad to zero. | |
if not args.deepspeed: | |
if args.DDP_impl == 'local' and args.use_contiguous_buffers_in_ddp: | |
for partition in model: | |
partition.zero_grad_buffer() | |
else: | |
optimizer.zero_grad() | |
if mpu.get_pipeline_model_parallel_world_size() > 1: | |
if args.virtual_pipeline_model_parallel_size is not None: | |
forward_backward_func = forward_backward_pipelining_with_interleaving | |
assert get_num_microbatches() % args.pipeline_model_parallel_size == 0, \ | |
'number of microbatches is not divisible by pipeline-parallel ' \ | |
'size when using interleaved schedule' | |
else: | |
forward_backward_func = forward_backward_pipelining_without_interleaving | |
else: | |
forward_backward_func = forward_backward_no_pipelining | |
losses_reduced = forward_backward_func( | |
forward_step_func, data_iterator, model, | |
optimizer, timers, forward_only=False, teacher_model=teacher_model) | |
# All-reduce if needed. | |
if not args.deepspeed and args.DDP_impl == 'local': | |
timers('backward-params-all-reduce').start() | |
for model_module in model: | |
model_module.allreduce_gradients() | |
timers('backward-params-all-reduce').stop() | |
# All-reduce word_embeddings' grad across first and last stages to ensure | |
# that word_embeddings parameters stay in sync. | |
# This should only run for models that support pipelined model parallelism | |
# (BERT and GPT-2). | |
timers('backward-embedding-all-reduce').start() | |
if not args.deepspeed: | |
if (mpu.is_pipeline_first_stage(ignore_virtual=True) or | |
mpu.is_pipeline_last_stage(ignore_virtual=True)) and \ | |
mpu.get_pipeline_model_parallel_world_size() > 1: | |
if mpu.is_pipeline_first_stage(ignore_virtual=True): | |
unwrapped_model = model[0] | |
elif mpu.is_pipeline_last_stage(ignore_virtual=True): | |
unwrapped_model = model[-1] | |
unwrapped_model = unwrap_model( | |
unwrapped_model, (torchDDP, LocalDDP, Float16Module)) | |
if unwrapped_model.share_word_embeddings: | |
word_embeddings_weight = unwrapped_model.word_embeddings_weight() | |
if args.DDP_impl == 'local': | |
grad = word_embeddings_weight.main_grad | |
else: | |
grad = word_embeddings_weight.grad | |
torch.distributed.all_reduce(grad, group=mpu.get_embedding_group()) | |
timers('backward-embedding-all-reduce').stop() | |
# Update parameters. | |
timers('optimizer').start() | |
if args.deepspeed: | |
increment = get_num_microbatches() * \ | |
args.micro_batch_size * \ | |
args.data_parallel_size | |
model[0].step(lr_kwargs={'increment': increment}) | |
update_successful = model[0].was_step_applied() | |
else: | |
update_successful, grad_norm, num_zeros_in_grad = optimizer.step() | |
timers('optimizer').stop() | |
# Update learning rate. | |
if args.deepspeed: | |
skipped_iter = 0 | |
grad_norm = None | |
num_zeros_in_grad = None | |
loss_reduced = {} | |
for key in losses_reduced[0]: | |
losses_reduced_for_key = [x[key] for x in losses_reduced] | |
loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key) | |
return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad | |
else: | |
if update_successful: | |
increment = get_num_microbatches() * \ | |
args.micro_batch_size * \ | |
args.data_parallel_size | |
lr_scheduler.step(increment=increment) | |
skipped_iter = 0 | |
else: | |
skipped_iter = 1 | |
if mpu.is_pipeline_last_stage(ignore_virtual=True): | |
# Average loss across microbatches. | |
loss_reduced = {} | |
for key in losses_reduced[0]: | |
losses_reduced_for_key = [x[key] for x in losses_reduced] | |
loss_reduced[key] = sum(losses_reduced_for_key) / len(losses_reduced_for_key) | |
return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad | |
return {}, skipped_iter, grad_norm, num_zeros_in_grad | |
def training_log(loss_dict, total_loss_dict, learning_rate, iteration, | |
loss_scale, report_memory_flag, skipped_iter, | |
grad_norm, params_norm, num_zeros_in_grad, | |
model=None, optimizer=None): | |
"""Log training information such as losses, timing, ....""" | |
args = get_args() | |
timers = get_timers() | |
writer = get_tensorboard_writer() | |
# Advanced, skipped, and Nan iterations. | |
advanced_iters_key = 'advanced iterations' | |
skipped_iters_key = 'skipped iterations' | |
nan_iters_key = 'nan iterations' | |
# Advanced iterations. | |
if not skipped_iter: | |
total_loss_dict[advanced_iters_key] = total_loss_dict.get( | |
advanced_iters_key, 0) + 1 | |
else: | |
if advanced_iters_key not in total_loss_dict: | |
total_loss_dict[advanced_iters_key] = 0 | |
# Skipped iterations. | |
total_loss_dict[skipped_iters_key] = total_loss_dict.get( | |
skipped_iters_key, 0) + skipped_iter | |
# Update losses and set nan iterations | |
got_nan = False | |
for key in loss_dict: | |
if not skipped_iter: | |
total_loss_dict[key] = total_loss_dict.get( | |
key, torch.FloatTensor([0.0])).to(get_current_device()) + loss_dict[key] | |
else: | |
value = loss_dict[key].float().sum().item() | |
is_nan = value == float('inf') or \ | |
value == -float('inf') or \ | |
value != value | |
got_nan = got_nan or is_nan | |
total_loss_dict[nan_iters_key] = total_loss_dict.get( | |
nan_iters_key, 0) + int(got_nan) | |
# Logging. | |
timers_to_log = [] | |
def add_to_logging(name): | |
if name in timers.timers: | |
timers_to_log.append(name) | |
add_to_logging('forward-compute') | |
add_to_logging('forward-recv') | |
add_to_logging('forward-send') | |
add_to_logging('forward-backward-send-forward-backward-recv') | |
add_to_logging('backward-compute') | |
add_to_logging('backward-recv') | |
add_to_logging('backward-send') | |
add_to_logging('backward-send-forward-recv') | |
add_to_logging('backward-send-backward-recv') | |
add_to_logging('backward-params-all-reduce') | |
add_to_logging('backward-embedding-all-reduce') | |
add_to_logging('optimizer-copy-to-main-grad') | |
add_to_logging('optimizer-unscale-and-check-inf') | |
add_to_logging('optimizer-clip-main-grad') | |
add_to_logging('optimizer-copy-main-to-model-params') | |
add_to_logging('optimizer') | |
add_to_logging('batch-generator') | |
# Calculate batch size. | |
batch_size = args.micro_batch_size * args.data_parallel_size * \ | |
get_num_microbatches() | |
total_iterations = total_loss_dict[advanced_iters_key] + \ | |
total_loss_dict[skipped_iters_key] | |
# Tensorboard values. | |
if writer and (iteration % args.tensorboard_log_interval == 0) and \ | |
is_last_rank(): | |
writer.add_scalar('steps-vs-samples/y=steps,x=samples', iteration, args.consumed_train_samples) | |
writer.add_scalar('steps-vs-samples/y=samples,x=steps', args.consumed_train_samples, iteration) | |
writer.add_scalar('steps-vs-tokens/y=steps,x=tokens', iteration, args.consumed_train_tokens) | |
writer.add_scalar('steps-vs-tokens/y=tokens,x=steps', args.consumed_train_tokens, iteration) | |
if args.log_learning_rate_to_tensorboard: | |
writer.add_scalar('learning-rate/learning-rate', learning_rate, iteration) | |
writer.add_scalar('learning-rate/learning-rate vs samples', learning_rate, | |
args.consumed_train_samples) | |
writer.add_scalar('learning-rate/learning-rate vs tokens', learning_rate, | |
args.consumed_train_tokens) | |
writer.add_scalar('learning-rate', learning_rate, iteration) | |
writer.add_scalar('learning-rate vs samples', learning_rate, | |
args.consumed_train_samples) | |
if args.log_batch_size_to_tensorboard: | |
writer.add_scalar('batch-size/batch-size', batch_size, iteration) | |
writer.add_scalar('batch-size/batch-size vs samples', batch_size, | |
args.consumed_train_samples) | |
for key in loss_dict: | |
writer.add_scalar(f"lm-loss-training/{key}", loss_dict[key], iteration) | |
writer.add_scalar(f"lm-loss-training/{key}" + ' vs samples', loss_dict[key], | |
args.consumed_train_samples) | |
writer.add_scalar(f"lm-loss-training/{key}" + ' vs tokens', loss_dict[key], | |
args.consumed_train_tokens) | |
writer.add_scalar(key, loss_dict[key], iteration) | |
writer.add_scalar(key + ' vs samples', loss_dict[key], | |
args.consumed_train_samples) | |
writer.add_scalar(key + ' vs tokens', loss_dict[key], | |
args.consumed_train_tokens) | |
if args.log_loss_scale_to_tensorboard: | |
writer.add_scalar('loss-scale/loss-scale', loss_scale, iteration) | |
writer.add_scalar('loss-scale/loss-scale vs samples', loss_scale, | |
args.consumed_train_samples) | |
writer.add_scalar('loss-scale/loss-scale vs tokens', loss_scale, | |
args.consumed_train_tokens) | |
if grad_norm is not None: | |
writer.add_scalar('grad-norm/grad-norm', grad_norm, iteration) | |
writer.add_scalar('grad-norm/grad-norm vs samples', grad_norm, | |
args.consumed_train_samples) | |
writer.add_scalar('grad-norm/grad-norm vs tokens', grad_norm, | |
args.consumed_train_tokens) | |
writer.add_scalar('grad-norm', grad_norm, iteration) | |
writer.add_scalar('grad-norm vs samples', grad_norm, | |
args.consumed_train_samples) | |
if num_zeros_in_grad is not None: | |
writer.add_scalar('num-zeros/num-zeros', num_zeros_in_grad, iteration) | |
writer.add_scalar('num-zeros/num-zeros vs samples', num_zeros_in_grad, | |
args.consumed_train_samples) | |
writer.add_scalar('num-zeros/num-zeros vs tokens', num_zeros_in_grad, | |
args.consumed_train_tokens) | |
if params_norm is not None: | |
writer.add_scalar('params-norm/params-norm', params_norm, iteration) | |
writer.add_scalar('params-norm/params-norm vs samples', params_norm, | |
args.consumed_train_samples) | |
writer.add_scalar('params-norm/params-norm vs tokens', params_norm, | |
args.consumed_train_tokens) | |
if args.curriculum_learning: | |
writer.add_scalar('curriculum_seqlen', args.curriculum_seqlen, | |
iteration) | |
if args.log_timers_to_tensorboard: | |
timers.write(timers_to_log, writer, iteration, | |
normalizer=total_iterations) | |
if iteration % args.tensorboard_log_interval == 0: | |
# This logging write various optimizer states to tensorboard. This | |
# feature may consume extra GPU memory thus is set at false by default. | |
if args.log_optimizer_states_to_tensorboard and optimizer is not None: | |
opt_stats = [0.0] * 8 | |
opt_stats_2 = [0.0] * 4 | |
for _, group in enumerate(optimizer.param_groups): | |
for _, param in enumerate(group['params']): | |
opt_stats[0] += (torch.norm(optimizer.state[param]['exp_avg_sq']).item())**2 | |
opt_stats[1] += (torch.norm(optimizer.state[param]['exp_avg_sq'].sqrt()).item())**2 | |
opt_stats[2] += (torch.norm(optimizer.state[param]['exp_avg']).item())**2 | |
opt_stats[3] += (torch.norm(param).item())**2 | |
opt_stats[4] += torch.norm(optimizer.state[param]['exp_avg_sq'], p=1).item() | |
opt_stats[5] += torch.norm(optimizer.state[param]['exp_avg_sq'].sqrt(), p=1).item() | |
opt_stats[6] += torch.norm(optimizer.state[param]['exp_avg'], p=1).item() | |
opt_stats[7] += torch.norm(param, p=1).item() | |
opt_stats_2[0] = max(opt_stats_2[0], abs(optimizer.state[param]['exp_avg_sq'].max().item()), abs( | |
optimizer.state[param]['exp_avg_sq'].min().item())) | |
opt_stats_2[1] = max(opt_stats_2[1], optimizer.state[param] | |
['exp_avg_sq'].sqrt().abs_().max().item()) | |
opt_stats_2[2] = max(opt_stats_2[2], abs(optimizer.state[param]['exp_avg'].max().item()), abs( | |
optimizer.state[param]['exp_avg'].min().item())) | |
opt_stats_2[3] = max(opt_stats_2[3], abs(param.max().item()), abs(param.min().item())) | |
# print('step {} rank {} before sync opt_stats {}, {}'.format(iteration, torch.distributed.get_rank(), opt_stats_2, opt_stats)) | |
if args.zero_stage > 0: | |
# ZeRO partiions optimizer states | |
opt_stats = torch.FloatTensor(opt_stats).to(get_current_device()) | |
torch.distributed.all_reduce(opt_stats, group=mpu.get_data_parallel_group()) | |
opt_stats_2 = torch.FloatTensor(opt_stats_2).to(get_current_device()) | |
torch.distributed.all_reduce(opt_stats_2, op=torch.distributed.ReduceOp.MAX, | |
group=mpu.get_data_parallel_group()) | |
if args.tensor_model_parallel_size > 1: | |
opt_stats = torch.FloatTensor(opt_stats).to(get_current_device()) | |
torch.distributed.all_reduce( | |
opt_stats, group=mpu.get_tensor_model_parallel_group()) | |
opt_stats_2 = torch.FloatTensor(opt_stats_2).to(get_current_device(),) | |
torch.distributed.all_reduce(opt_stats_2, op=torch.distributed.ReduceOp.MAX, | |
group=mpu.get_tensor_model_parallel_group()) | |
if args.pipeline_model_parallel_size > 1: | |
opt_stats = torch.FloatTensor(opt_stats).to(get_current_device()) | |
torch.distributed.all_reduce( | |
opt_stats, group=mpu.get_pipeline_model_parallel_group()) | |
opt_stats_2 = torch.FloatTensor(opt_stats_2).to(get_current_device()) | |
torch.distributed.all_reduce(opt_stats_2, op=torch.distributed.ReduceOp.MAX, | |
group=mpu.get_pipeline_model_parallel_group()) | |
# print('step {} rank {} after sync opt_stats {}, {}'.format(iteration, torch.distributed.get_rank(), opt_stats_2, opt_stats)) | |
if writer and is_last_rank(): | |
writer.add_scalar('optimizer/variance_l2 vs tokens', opt_stats[0]**0.5, args.consumed_train_tokens) | |
writer.add_scalar('optimizer/variance_sqrt_l2 vs tokens', opt_stats[1]**0.5, args.consumed_train_tokens) | |
writer.add_scalar('optimizer/momentum_l2 vs tokens', opt_stats[2]**0.5, args.consumed_train_tokens) | |
writer.add_scalar('optimizer/weight_l2 vs tokens', opt_stats[3]**0.5, args.consumed_train_tokens) | |
writer.add_scalar('optimizer/variance_l1 vs tokens', opt_stats[4], args.consumed_train_tokens) | |
writer.add_scalar('optimizer/variance_sqrt_l1 vs tokens', opt_stats[5], args.consumed_train_tokens) | |
writer.add_scalar('optimizer/momentum_l1 vs tokens', opt_stats[6], args.consumed_train_tokens) | |
writer.add_scalar('optimizer/weight_l1 vs tokens', opt_stats[7], args.consumed_train_tokens) | |
writer.add_scalar('optimizer/variance_abs_max vs tokens', opt_stats_2[0], args.consumed_train_tokens) | |
writer.add_scalar('optimizer/variance_sqrt_abs_max vs tokens', | |
opt_stats_2[1], args.consumed_train_tokens) | |
writer.add_scalar('optimizer/momentum_abs_max vs tokens', opt_stats_2[2], args.consumed_train_tokens) | |
writer.add_scalar('optimizer/weight_abs_max vs tokens', opt_stats_2[3], args.consumed_train_tokens) | |
writer.add_scalar('optimizer/variance_l2', opt_stats[0]**0.5, iteration) | |
writer.add_scalar('optimizer/variance_sqrt_l2', opt_stats[1]**0.5, iteration) | |
writer.add_scalar('optimizer/momentum_l2', opt_stats[2]**0.5, iteration) | |
writer.add_scalar('optimizer/weight_l2', opt_stats[3]**0.5, iteration) | |
writer.add_scalar('optimizer/variance_l1', opt_stats[4], iteration) | |
writer.add_scalar('optimizer/variance_sqrt_l1', opt_stats[5], iteration) | |
writer.add_scalar('optimizer/momentum_l1', opt_stats[6], iteration) | |
writer.add_scalar('optimizer/weight_l1', opt_stats[7], iteration) | |
writer.add_scalar('optimizer/variance_abs_max', opt_stats_2[0], iteration) | |
writer.add_scalar('optimizer/variance_sqrt_abs_max', opt_stats_2[1], iteration) | |
writer.add_scalar('optimizer/momentum_abs_max', opt_stats_2[2], iteration) | |
writer.add_scalar('optimizer/weight_abs_max', opt_stats_2[3], iteration) | |
if iteration % args.log_interval == 0: | |
elapsed_time = timers('interval-time').elapsed() | |
elapsed_time_per_iteration = elapsed_time / total_iterations | |
seq_len = args.curriculum_seqlen if args.curriculum_learning else args.seq_length | |
hidden_size = args.hidden_size | |
num_layers = args.num_layers | |
vocab_size = args.padded_vocab_size | |
samples_per_sec, tflops, approx_parameters_in_billions = throughput_calculator( | |
model, args, elapsed_time, total_iterations) | |
# Compute throughput. | |
samples_per_sec_per_replica = samples_per_sec / args.data_parallel_size | |
tokens_per_sec = samples_per_sec * seq_len | |
tokens_per_sec_per_replica = tokens_per_sec / args.data_parallel_size | |
# only the last rank process has a non-None _GLOBAL_TENSORBOARD_WRITER | |
if writer and is_last_rank(): | |
if args.log_timers_to_tensorboard: | |
writer.add_scalar('iteration-time/iteration-time', | |
elapsed_time_per_iteration, iteration) | |
writer.add_scalar('iteration-time/iteration-time vs samples', | |
elapsed_time_per_iteration, args.consumed_train_samples) | |
writer.add_scalar('iteration-time/iteration-time vs tokens', | |
elapsed_time_per_iteration, args.consumed_train_tokens) | |
log_string = ' iteration {:8d}/{:8d} |'.format( | |
iteration, args.train_iters) | |
log_string += ' consumed samples: {:12d} |'.format( | |
args.consumed_train_samples) | |
log_string += ' consumed tokens: {:12d} |'.format( | |
args.consumed_train_tokens) | |
log_string += ' elapsed time per iteration (ms): {:.1f} |'.format( | |
elapsed_time_per_iteration * 1000.0) | |
log_string += ' learning rate: {:.3E} |'.format(learning_rate) | |
log_string += ' global batch size: {:5d} |'.format(batch_size) | |
for key in total_loss_dict: | |
if key not in [advanced_iters_key, skipped_iters_key, | |
nan_iters_key]: | |
avg = total_loss_dict[key].item() / \ | |
float(max(1, total_loss_dict[advanced_iters_key])) | |
if avg > 0.0: | |
log_string += ' {}: {:.6E} |'.format(key, avg) | |
total_loss_dict[key] = torch.FloatTensor([0.0]).to(get_current_device()) | |
log_string += ' loss scale: {:.1f} |'.format(loss_scale) | |
if grad_norm is not None: | |
log_string += ' grad norm: {:.3f} |'.format(grad_norm) | |
if num_zeros_in_grad is not None: | |
log_string += ' num zeros: {:.1f} |'.format(num_zeros_in_grad) | |
if params_norm is not None: | |
log_string += ' params norm: {:.3f} |'.format(params_norm) | |
if args.curriculum_learning: | |
log_string += ' curriculum seqlen: {:5d} |'.format(args.curriculum_seqlen) | |
log_string += ' number of skipped iterations: {:3d} |'.format( | |
total_loss_dict[skipped_iters_key]) | |
log_string += ' number of nan iterations: {:3d} |'.format( | |
total_loss_dict[nan_iters_key]) | |
log_string += ' samples per second: {:.3f} |'.format(samples_per_sec) | |
log_string += ' TFLOPs: {:.2f} |'.format(tflops) | |
total_loss_dict[advanced_iters_key] = 0 | |
total_loss_dict[skipped_iters_key] = 0 | |
total_loss_dict[nan_iters_key] = 0 | |
print_rank_last(log_string) | |
if report_memory_flag and learning_rate > 0.: | |
# Report memory after optimizer state has been initialized. | |
report_memory('(after {} iterations)'.format(iteration)) | |
report_memory_flag = False | |
timers.log(timers_to_log, normalizer=args.log_interval) | |
return report_memory_flag | |
def save_checkpoint_and_time(iteration, model, optimizer, lr_scheduler): | |
timers = get_timers() | |
# Extra barrier is added to make sure | |
# all ranks report the max time. | |
torch.distributed.barrier() | |
timers('save-checkpoint').start() | |
save_checkpoint(iteration, model, optimizer, lr_scheduler) | |
torch.distributed.barrier() | |
timers('save-checkpoint').stop() | |
checkpoint_throughput_calculator(model, timers('save-checkpoint').elapsed(reset=False)) | |
timers.log(['save-checkpoint']) | |
def train(forward_step_func, model, optimizer, lr_scheduler, | |
train_data_iterator, valid_data_iterator, teacher_model=None, mllogger=None): | |
"""Train the model function.""" | |
args = get_args() | |
timers = get_timers() | |
# Write args to tensorboard | |
write_args_to_tensorboard() | |
setup_profiler(args, get_current_device()) | |
# Turn on training mode which enables dropout. | |
for model_module in model: | |
model_module.train() | |
# Tracking loss. | |
total_loss_dict = {} | |
# Used to control the evaluation frequency | |
first_iter = args.iteration | |
# Iterations. | |
iteration = args.iteration | |
timers('interval-time').start() | |
print_datetime('before the start of training step') | |
report_memory_flag = True | |
# CLearML init: | |
if args.clearml_config_path != None and is_last_rank(): | |
if not os.path.exists(args.clearml_config_path): | |
raise Exception(f"Could not access {args.clearml_config_path}") | |
if args.clearml_exp_name != None: | |
exp_name = args.clearml_exp_name | |
else: | |
exp_name = time.strftime("%Y_%m_%d_%H_%M") | |
os.environ["CLEARML_CONFIG_FILE"] = args.clearml_config_path | |
from clearml import Task | |
task = Task.get_task(project_name="Megatron-DeepSpeed", task_name=args.clearml_exp_name) | |
if args.clearml_continue_exp: | |
clearml_task = Task.init("Megatron-DeepSpeed", exp_name, continue_last_task=task.task_id) | |
else: | |
clearml_task = Task.init("Megatron-DeepSpeed", exp_name) | |
if args.tensor_logger_max_iter > 0: | |
from deepspeed.tools.tensor_logger import TensorLogger, save_logged_tensors | |
tensor_logger = TensorLogger(model[0].module, | |
log_activations_enabled=args.log_fwd_activations, | |
max_iterations=args.tensor_logger_max_iter, | |
log_grads_enabled=args.log_bwd_grads, | |
log_inputs_enabled=args.log_model_inputs, | |
prefix=None) | |
else: | |
tensor_logger = None | |
while iteration < args.train_iters and (args.train_tokens is None or | |
args.consumed_train_tokens < args.train_tokens): | |
trigger(on_step_begin) | |
update_num_microbatches(args.consumed_train_samples) | |
if args.deepspeed: | |
# inform deepspeed of any batch size changes | |
global_batch_size = mpu.get_data_parallel_world_size() * \ | |
args.micro_batch_size * \ | |
get_num_microbatches() | |
model[0].set_train_batch_size(global_batch_size) | |
if args.curriculum_learning and not args.no_pipeline_parallel: | |
args.curriculum_seqlen = args.curriculum_scheduler.update_difficulty( | |
args.iteration + 1) | |
with tensor_logger.log_iteration(iteration) if tensor_logger else nullcontext(): | |
loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = \ | |
train_step(forward_step_func, | |
train_data_iterator, | |
model, | |
optimizer, | |
lr_scheduler, | |
teacher_model=teacher_model) | |
iteration += 1 | |
args.iteration = iteration | |
new_samples = mpu.get_data_parallel_world_size() * \ | |
args.micro_batch_size * \ | |
get_num_microbatches() | |
args.consumed_train_samples += new_samples | |
if args.curriculum_learning: | |
args.consumed_train_tokens += new_samples * args.curriculum_seqlen | |
else: | |
args.consumed_train_tokens += new_samples * args.seq_length | |
# Logging. | |
loss_scale = 0 | |
if args.fp16: | |
if args.deepspeed: | |
loss_scale = model[0].optimizer.cur_scale | |
else: | |
loss_scale = optimizer.get_loss_scale().item() | |
params_norm = None | |
if args.log_params_norm: | |
params_norm = calc_params_l2_norm(model) | |
report_memory_flag = training_log(loss_dict, total_loss_dict, | |
optimizer.param_groups[0]['lr'], | |
iteration, loss_scale, | |
report_memory_flag, skipped_iter, | |
grad_norm, params_norm, num_zeros_in_grad, | |
model, optimizer) | |
# Autoresume | |
if args.adlr_autoresume and \ | |
(iteration % args.adlr_autoresume_interval == 0): | |
check_adlr_autoresume_termination(iteration, model, optimizer, | |
lr_scheduler) | |
# Evaluation | |
if args.eval_interval and (iteration - first_iter) % args.eval_interval == 0 and \ | |
args.do_valid: | |
prefix = 'iteration {}'.format(iteration) | |
eval_loss = evaluate_and_print_results(prefix, forward_step_func, | |
valid_data_iterator, model, | |
iteration, False, mllogger=mllogger) | |
# Exiting based on eval loss | |
if args.eval_loss_exit_value is not None and eval_loss <= args.eval_loss_exit_value: | |
if args.save: | |
save_checkpoint_and_time(iteration, model, optimizer, | |
lr_scheduler) | |
torch.distributed.barrier() | |
print_datetime(f"Reached target loss value: {args.eval_loss_exit_value}. " | |
f"Stopping the training at iteration: {iteration} with loss: {eval_loss}") | |
if mllogger is not None: | |
mllogger.event(key=mllogger.constants.TRAIN_SAMPLES, | |
value=(args.consumed_train_samples - args.ext_lr_steps) | |
* args.seq_length, sync=False) | |
mllogger.end(key=mllogger.constants.BLOCK_STOP, | |
metadata={'first_epoch_num': 0}, | |
sync=False) | |
mllogger.end(key=mllogger.constants.EPOCH_STOP, | |
metadata={'epoch_num': (args.consumed_train_samples - args.ext_lr_steps) | |
* args.seq_length}, sync=False) | |
status = 'success' | |
mllogger.log_run_stop(status) | |
sys.exit() | |
# Checkpointing | |
saved_checkpoint = False | |
if args.save and args.save_interval and \ | |
iteration % args.save_interval == 0: | |
save_checkpoint_and_time(iteration, model, optimizer, | |
lr_scheduler) | |
saved_checkpoint = True | |
# Exiting based on duration | |
if args.exit_duration_in_mins: | |
train_time = (time.time() - _TRAIN_START_TIME) / 60.0 | |
done_cuda = torch.IntTensor( | |
[train_time > args.exit_duration_in_mins]).to(get_current_device()) | |
torch.distributed.all_reduce( | |
done_cuda, op=torch.distributed.ReduceOp.MAX) | |
done = done_cuda.item() | |
if done: | |
status = 'fail' | |
if mllogger is not None: | |
mllogger.log_run_stop(status) | |
mllogger.event(key=mllogger.constants.TRAIN_SAMPLES, | |
value=(args.consumed_train_samples - args.ext_lr_steps) * args.seq_length, | |
sync=False) | |
if args.save and not saved_checkpoint: | |
save_checkpoint_and_time(iteration, model, optimizer, | |
lr_scheduler) | |
print_datetime('exiting program after {} minutes'.format(train_time)) | |
sys.exit() | |
# Exiting based on iterations | |
if args.exit_interval and iteration % args.exit_interval == 0: | |
if args.save and not saved_checkpoint: | |
save_checkpoint_and_time(iteration, model, optimizer, | |
lr_scheduler) | |
torch.distributed.barrier() | |
print_datetime('exiting program at iteration {}'.format(iteration)) | |
sys.exit() | |
# Exiting based on kill-switch | |
if found_kill_switch(): | |
if args.save and not saved_checkpoint: | |
save_checkpoint_and_time(iteration, model, optimizer, | |
lr_scheduler) | |
print_datetime(f"Detected kill switch at {args.kill_switch_path}, " | |
f"iteration={iteration}. Exiting") | |
sys.exit() | |
if args.tensor_logger_max_iter > 0: | |
save_logged_tensors(tensor_logger, args.tensor_logger_path, args.rank, iteration) | |
trigger(on_step_end) | |
return iteration | |
def evaluate(forward_step_func, data_iterator, model, verbose=False): | |
"""Evaluation.""" | |
args = get_args() | |
# Turn on evaluation mode which disables dropout. | |
for model_module in model: | |
model_module.eval() | |
if args.curriculum_learning and not args.no_pipeline_parallel: | |
# When curriculum learning is used with pipeline parallelism, we need | |
# this logic to ensure that the eval data is not truncated. If there | |
# is a seqlen change due to that, we need to call | |
# reset_activation_shape() to reset some buffers in deepspeed pipeline | |
# engine. | |
if args.curriculum_seqlen < args.seq_length: | |
args.curriculum_seqlen = args.seq_length | |
model[0].reset_activation_shape() | |
if args.eval_micro_batch_size != args.micro_batch_size: | |
reset_cached_broadcast_sizes() | |
model[0].reset_activation_shape() | |
total_loss_dict = {} | |
with torch.no_grad(): | |
iteration = 0 | |
total_iterations = args.eval_iters | |
if args.eval_iters == -1: | |
print_rank_0(F"Evaluation on the entire set as eval-iters is set to {args.eval_iters}") | |
samples_per_iteration = mpu.get_data_parallel_world_size() \ | |
* args.eval_micro_batch_size \ | |
* get_num_eval_microbatches() | |
total_iterations = math.ceil(args.eval_total_samples / samples_per_iteration) | |
print_rank_0( | |
F"Evaluation Iterations: {total_iterations}, Total Eval Samples: {args.eval_total_samples}, samples per iteration: {samples_per_iteration}") | |
args.consumed_valid_samples = 0 | |
num_eval_microbatches = get_num_eval_microbatches() | |
while iteration < total_iterations: | |
iteration += 1 | |
if iteration == total_iterations and args.eval_iters == -1: | |
num_eval_microbatches = math.ceil((args.eval_total_samples - args.consumed_valid_samples) / | |
(mpu.get_data_parallel_world_size() * args.eval_micro_batch_size)) | |
if verbose and iteration % args.log_interval == 0: | |
print_rank_0('Evaluating iter {}/{}'.format(iteration, | |
total_iterations)) | |
if mpu.get_pipeline_model_parallel_world_size() > 1: | |
if args.virtual_pipeline_model_parallel_size is not None: | |
forward_backward_func = forward_backward_pipelining_with_interleaving | |
else: | |
forward_backward_func = forward_backward_pipelining_without_interleaving | |
else: | |
forward_backward_func = forward_backward_no_pipelining | |
if args.deepspeed and args.ds_pipeline_enabled: | |
# DeepSpeed uses eval_batch() and already aggregates losses. | |
assert isinstance(model, list) and len(model) == 1 | |
loss = model[0].eval_batch(data_iterator, bcast_loss=False, eval_micro_batches=num_eval_microbatches) | |
loss_dicts = [{'lm loss': loss}] * num_eval_microbatches | |
else: | |
assert args.micro_batch_size == args.eval_micro_batch_size, \ | |
"evaluate (training) - Megatron's forward_backward_func options - " \ | |
"Unsupported for split micro batch size" | |
loss_dicts = forward_backward_func( | |
forward_step_func, data_iterator, model, optimizer=None, | |
timers=None, forward_only=True) | |
if mpu.is_pipeline_last_stage(ignore_virtual=True): | |
# Reduce across processes. | |
for loss_dict in loss_dicts: | |
for key in loss_dict: | |
if 'moe' not in key: | |
total_loss_dict[key] = total_loss_dict.get( | |
key, torch.FloatTensor([0.0])).to(get_current_device()) + loss_dict[key] | |
if not args.device_warmup: | |
args.consumed_valid_samples += mpu.get_data_parallel_world_size() \ | |
* args.eval_micro_batch_size \ | |
* num_eval_microbatches | |
# Move model back to the train mode. | |
for model_module in model: | |
model_module.train() | |
for key in total_loss_dict: | |
total_loss_dict[key] /= ((total_iterations - 1) * get_num_eval_microbatches()) + num_eval_microbatches | |
if args.curriculum_learning and not args.no_pipeline_parallel: | |
# roll back to actual curriculum seqlen at the end of eval. | |
args.curriculum_seqlen = args.curriculum_scheduler.update_difficulty( | |
args.iteration + 1) | |
if args.curriculum_seqlen < args.seq_length: | |
model[0].reset_activation_shape() | |
if args.eval_micro_batch_size != args.micro_batch_size: | |
reset_cached_broadcast_sizes() | |
model[0].reset_activation_shape() | |
return total_loss_dict | |
def warmup(args, forward_step_func, warmup_train_iterator, warmup_valid_iterator, model, optimizer, lr_scheduler, | |
teacher_model, prefix, iteration, mllogger): | |
# save fp8 modules state: | |
import copy | |
saved_fp8_modules = [] | |
if args.use_hpu_fp8_transformer_engine: | |
base_model = model[0].module | |
for layer in base_model.modules(): | |
if isinstance(layer, ColumnParallelLinear) or isinstance(layer, RowParallelLinear): | |
copied_fp8_module = copy.deepcopy(layer.output_parallel_linear) | |
saved_fp8_modules.append(copied_fp8_module) | |
for _ in range(args.device_warmup_iterations): | |
train_step(forward_step_func, | |
warmup_train_iterator, | |
model, | |
optimizer, | |
lr_scheduler, | |
teacher_model=teacher_model) | |
evaluate_and_print_results(prefix, forward_step_func, | |
warmup_valid_iterator, model, | |
iteration, False, mllogger=mllogger) | |
htcore.mark_step() | |
# load fp8 modules states | |
if args.use_hpu_fp8_transformer_engine: | |
base_model = model[0].module | |
for layer in base_model.modules(): | |
if isinstance(layer, ColumnParallelLinear) or isinstance(layer, RowParallelLinear): | |
layer.output_parallel_linear = saved_fp8_modules.pop(0) | |
lr_scheduler.num_steps = 0 | |
model[0].global_steps = 0 | |
model[0].global_samples = 0 | |
htcore.mark_step() | |
if args.load: | |
print(f'Load model weights after the warmup') | |
torch.distributed.barrier() | |
args.iteration = load_checkpoint(model, optimizer, lr_scheduler) | |
torch.distributed.barrier() | |
if teacher_model is not None: | |
print(f'Load teacher model weights after the warmup') | |
torch.distributed.barrier() | |
load_checkpoint(teacher_model, strict=False) | |
torch.distributed.barrier() | |
if args.deepspeed and args.universal_checkpoint: | |
sync_hp_to_lp(optimizer) | |
args.device_warmup = False | |
def evaluate_and_print_results(prefix, forward_step_func, | |
data_iterator, model, | |
iteration, verbose=False, mllogger=None): | |
"""Helper function to evaluate and dump results on screen.""" | |
print_rank_last(f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')} Start last rank evaluation") | |
args = get_args() | |
if not args.device_warmup and mllogger is not None: | |
writer = get_tensorboard_writer() | |
mllogger.start(key=mllogger.constants.EVAL_START, | |
metadata={'epoch_num': (args.consumed_train_samples - args.ext_lr_steps) * args.seq_length}, | |
sync=False) | |
total_loss_dict = evaluate(forward_step_func, data_iterator, model, verbose) | |
string = ' validation loss at {} | '.format(prefix) | |
eval_loss = 0 | |
if args.device_warmup: | |
return eval_loss | |
for key in total_loss_dict: | |
eval_loss = total_loss_dict[key].item() | |
string += '{} value: {:.6E} | '.format(key, eval_loss) | |
ppl = math.exp(min(20, eval_loss)) | |
string += '{} PPL: {:.6E} | '.format(key, ppl) | |
if writer and is_last_rank(): | |
writer.add_scalar(f'lm-loss-validation/{key} validation', | |
eval_loss, | |
iteration) | |
writer.add_scalar(f"lm loss validation", | |
eval_loss, | |
iteration) | |
writer.add_scalar(f'lm-loss-validation/{key} validation vs samples', | |
eval_loss, | |
args.consumed_train_samples) | |
writer.add_scalar(f'lm-loss-validation/{key} validation vs tokens', | |
eval_loss, | |
args.consumed_train_tokens) | |
if args.log_validation_ppl_to_tensorboard: | |
writer.add_scalar(f'lm-loss-validation/{key} validation ppl', ppl, | |
iteration) | |
writer.add_scalar(f'lm-loss-validation/{key} validation ppl vs samples', | |
ppl, args.consumed_train_samples) | |
writer.add_scalar(f'lm-loss-validation/{key} validation ppl vs tokens', | |
ppl, args.consumed_train_tokens) | |
string = f" {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} |{string}" | |
length = len(string) + 1 | |
print_rank_last('-' * length) | |
print_rank_last(string) | |
print_rank_last('-' * length) | |
eval_loss_tensor = torch.FloatTensor([eval_loss]).to(get_current_device()) | |
torch.distributed.all_reduce(eval_loss_tensor, op=torch.distributed.ReduceOp.MAX) | |
eval_loss = eval_loss_tensor.item() | |
if not args.device_warmup and mllogger is not None: | |
mllogger.event(mllogger.constants.EVAL_ACCURACY, value=eval_loss, | |
metadata=dict(epoch_num=(args.consumed_train_samples - args.ext_lr_steps) * args.seq_length), | |
sync=False) | |
mllogger.end(key=mllogger.constants.EVAL_STOP, | |
metadata=dict(epoch_num=(args.consumed_train_samples - args.ext_lr_steps) * args.seq_length), | |
sync=False) | |
return eval_loss | |
def cyclic_iter(iter): | |
while True: | |
for x in iter: | |
yield x | |
def build_warmup_iterators(): | |
args = get_args() | |
warmup_samples = args.global_batch_size | |
warmup_num_samples = [warmup_samples, 0, 0] | |
warmup_train_data_ratio = 1.0 | |
warmup_ds = build_train_valid_test_datasets( | |
data_prefix=None, | |
train_data_prefix=[warmup_train_data_ratio, args.warmup_dataset_path], | |
valid_data_prefix=None, | |
test_data_prefix=None, | |
data_impl=args.data_impl, | |
splits_string='100, 0, 0', | |
train_valid_test_num_samples=warmup_num_samples, | |
seq_length=args.seq_length, | |
seed=args.seed, | |
skip_warmup=(not args.mmap_warmup), | |
use_seq_len_plus_one_tokens=args.use_seq_len_plus_one_tokens)[0] | |
warmup_train_dataloader = build_pretraining_data_loader( | |
warmup_ds, 0, True) | |
warmup_valid_dataloader = build_pretraining_data_loader( | |
warmup_ds, 0, False) | |
args.eval_total_samples = warmup_samples | |
return iter(cyclic_iter(warmup_train_dataloader)), iter(cyclic_iter(warmup_valid_dataloader)) | |
def build_train_valid_test_data_iterators( | |
build_train_valid_test_datasets_provider): | |
"""XXX""" | |
args = get_args() | |
(train_dataloader, valid_dataloader, test_dataloader) = (None, None, None) | |
print_rank_0('> building train, validation, and test datasets ...') | |
# Backward compatibility, assume fixed batch size. | |
if args.iteration > 0 and args.consumed_train_samples == 0: | |
assert args.train_samples is None, \ | |
'only backward compatiblity support for iteration-based training' | |
args.consumed_train_samples = args.iteration * args.global_batch_size | |
if args.iteration > 0 and args.consumed_valid_samples == 0: | |
if args.train_samples is None: | |
args.consumed_valid_samples = (args.iteration // args.eval_interval) * \ | |
args.eval_iters * args.global_batch_size | |
# Data loader only on rank 0 of each model parallel group. | |
if mpu.get_tensor_model_parallel_rank() == 0: | |
# Number of train/valid/test samples. | |
if args.train_samples: | |
train_samples = args.train_samples | |
else: | |
train_samples = args.train_iters * args.global_batch_size | |
eval_iters = (args.train_iters // args.eval_interval + 1) * \ | |
args.eval_iters | |
test_iters = args.eval_iters | |
if args.eval_iters == -1: | |
print_rank_0("Evaluation iterations are set to -1") | |
train_val_test_num_samples = [train_samples, -1, -1] | |
else: | |
train_val_test_num_samples = [train_samples, | |
eval_iters * args.global_batch_size, | |
test_iters * args.global_batch_size] | |
print_rank_0(' > datasets target sizes (minimum size):') | |
print_rank_0(' train: {}'.format(train_val_test_num_samples[0])) | |
print_rank_0(' validation: {}'.format(train_val_test_num_samples[1])) | |
print_rank_0(' test: {}'.format(train_val_test_num_samples[2])) | |
# Build the datasets. | |
train_ds, valid_ds, test_ds = build_train_valid_test_datasets_provider( | |
train_val_test_num_samples) | |
if args.eval_iters == -1: | |
eval_total_samples = len(valid_ds) | |
consumed_valid_samples = 0 | |
use_all_eval_samples = True | |
else: | |
eval_total_samples = 0 | |
consumed_valid_samples = args.consumed_valid_samples | |
use_all_eval_samples = False | |
# Build dataloders. | |
train_dataloader = build_pretraining_data_loader( | |
train_ds, args.consumed_train_samples, True) | |
valid_dataloader = build_pretraining_data_loader( | |
valid_ds, consumed_valid_samples, False, use_all_eval_samples) | |
test_dataloader = build_pretraining_data_loader(test_ds, 0, False) | |
# Flags to know if we need to do training/validation/testing. | |
do_train = train_dataloader is not None and args.train_iters > 0 \ | |
and not args.skip_train | |
do_valid = valid_dataloader is not None and (args.eval_iters > 0 or args.eval_iters == -1) | |
do_test = test_dataloader is not None and args.eval_iters > 0 | |
# Need to broadcast num_tokens and num_type_tokens. | |
flags = torch.IntTensor( | |
[int(do_train), int(do_valid), int(do_test), int(eval_total_samples)]).to(get_current_device()) | |
else: | |
flags = torch.IntTensor([0, 0, 0, 0]).to(get_current_device()) | |
# Broadcast num tokens. | |
torch.distributed.broadcast(flags, | |
mpu.get_tensor_model_parallel_src_rank(), | |
group=mpu.get_tensor_model_parallel_group()) | |
args.do_train = flags[0].item() | |
args.do_valid = flags[1].item() | |
args.do_test = flags[2].item() | |
args.eval_total_samples = flags[3].item() | |
# Build iterators. | |
dl_type = args.dataloader_type | |
assert dl_type in ['single', 'cyclic'] | |
if train_dataloader is not None: | |
train_data_iterator = iter(train_dataloader) if dl_type == 'single' \ | |
else iter(cyclic_iter(train_dataloader)) | |
else: | |
train_data_iterator = None | |
if valid_dataloader is not None: | |
valid_data_iterator = iter(valid_dataloader) if (dl_type == 'single' and args.eval_iters != -1) \ | |
else iter(cyclic_iter(valid_dataloader)) | |
else: | |
valid_data_iterator = None | |
if test_dataloader is not None: | |
test_data_iterator = iter(test_dataloader) if dl_type == 'single' \ | |
else iter(cyclic_iter(test_dataloader)) | |
else: | |
test_data_iterator = None | |
return train_data_iterator, valid_data_iterator, test_data_iterator | |