|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
set -x -e |
|
|
|
source $six_ALL_CCFRWORK/start-prod |
|
|
|
nvidia-smi |
|
|
|
cd $six_ALL_CCFRWORK/code/megatron-lm/ |
|
|
|
CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release |
|
VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json |
|
MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt |
|
DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document |
|
SAVE_CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/gpt2-1-node |
|
|
|
MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) |
|
MASTER_PORT=6000 |
|
|
|
|
|
|
|
NNODES=4 |
|
PP_SIZE=8 # NLAYERS must be a multiple of PP_SIZE here |
|
MICRO_BATCH_SIZE=6 |
|
PP_CHUNKS=128 |
|
|
|
MSIZE=18 |
|
|
|
if [[ ${MSIZE} == 7 ]]; then NHIDDEN=4096; NLAYERS=36 |
|
elif [[ ${MSIZE} == 14 ]]; then NHIDDEN=6144; NLAYERS=32 |
|
elif [[ ${MSIZE} == 18 ]]; then NHIDDEN=6144; NLAYERS=40 |
|
elif [[ ${MSIZE} == 25 ]]; then NHIDDEN=7168; NLAYERS=40 |
|
elif [[ ${MSIZE} == 30 ]]; then NHIDDEN=7168; NLAYERS=48 |
|
elif [[ ${MSIZE} == 39 ]]; then NHIDDEN=8192; NLAYERS=48 |
|
elif [[ ${MSIZE} == 52 ]]; then NHIDDEN=8192; NLAYERS=64 |
|
elif [[ ${MSIZE} == 65 ]]; then NHIDDEN=9216; NLAYERS=64 |
|
elif [[ ${MSIZE} == 81 ]]; then NHIDDEN=10240; NLAYERS=64 |
|
elif [[ ${MSIZE} == 97 ]]; then NHIDDEN=11264; NLAYERS=64 |
|
elif [[ ${MSIZE} == 116 ]]; then NHIDDEN=12288; NLAYERS=64 |
|
elif [[ ${MSIZE} == 136 ]]; then NHIDDEN=13312; NLAYERS=64 |
|
elif [[ ${MSIZE} == 158 ]]; then NHIDDEN=14336; NLAYERS=64 |
|
elif [[ ${MSIZE} == 181 ]]; then NHIDDEN=15360; NLAYERS=64 |
|
elif [[ ${MSIZE} == 206 ]]; then NHIDDEN=16384; NLAYERS=64 |
|
else echo "invalid MSIZE: $MSIZE" |
|
fi |
|
|
|
|
|
GPUS_PER_NODE=4 |
|
NHEADS=32 |
|
SEQ_LEN=1024 |
|
VOCAB_SIZE=50257 |
|
|
|
DP_SIZE=1 |
|
TP_SIZE=4 |
|
|
|
GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$PP_CHUNKS*$DP_SIZE)) |
|
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) |
|
|
|
GPT_ARGS=" \ |
|
--num-layers $NLAYERS \ |
|
--hidden-size $NHIDDEN \ |
|
--num-attention-heads $NHEADS \ |
|
--seq-length $SEQ_LEN \ |
|
--max-position-embeddings $SEQ_LEN \ |
|
--micro-batch-size $MICRO_BATCH_SIZE \ |
|
--global-batch-size $GLOBAL_BATCH_SIZE |
|
--lr 0.00015 \ |
|
--lr-decay-style cosine \ |
|
--min-lr 1.0e-5 \ |
|
--train-iters 1000 \ |
|
--lr-decay-iters 800 \ |
|
--lr-warmup-fraction .01 \ |
|
--weight-decay 1e-2 \ |
|
--clip-grad 1.0 \ |
|
--vocab-file $VOCAB_FILE \ |
|
--merge-file $MERGE_FILE \ |
|
--fp16 \ |
|
--checkpoint-activations \ |
|
" |
|
|
|
OUTPUT_ARGS=" \ |
|
--log-interval 1 \ |
|
--save-interval 500 \ |
|
--eval-interval 100 \ |
|
--eval-iters 10 \ |
|
" |
|
|
|
export LAUNCHER="python -u -m torch.distributed.launch \ |
|
--nproc_per_node $GPUS_PER_NODE \ |
|
--nnodes $NNODES \ |
|
--master_addr $MASTER_ADDR \ |
|
--master_port $MASTER_PORT \ |
|
" |
|
|
|
export CMD=" \ |
|
`pwd`/pretrain_gpt.py \ |
|
--tensor-model-parallel-size $TP_SIZE \ |
|
--pipeline-model-parallel-size $PP_SIZE \ |
|
$GPT_ARGS \ |
|
$OUTPUT_ARGS \ |
|
--save $SAVE_CHECKPOINT_PATH \ |
|
--load $SAVE_CHECKPOINT_PATH \ |
|
--data-path $DATA_PATH \ |
|
--data-impl mmap \ |
|
--split 949,50,1 \ |
|
--distributed-backend nccl \ |
|
" |
|
|
|
|
|
rm -rf $six_ALL_CCFRWORK/checkpoints/gpt2-1-node |
|
|
|
|
|
python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l * (12*h**2 + 13*h) + (v * h) + (s * h) ) / 10**9 :.0f}B')" |
|
|
|
|
|
clear; srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee meg_gpt2_perf_n4.out |
|
|