|
#!/bin/bash |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GPUS_PER_NODE=4 |
|
NNODES=$SLURM_JOB_NUM_NODES |
|
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) |
|
|
|
set -x -e |
|
|
|
source $six_ALL_CCFRWORK/start-prod |
|
|
|
cd $six_ALL_CCFRWORK/code/megatron-lm |
|
|
|
CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release |
|
VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json |
|
MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt |
|
DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document |
|
SAVE_CHECKPOINT_PATH=$six_ALL_CCFRWORK/checkpoints/gpt2 |
|
|
|
MASTER_ADDR=`hostname` |
|
MASTER_PORT=13370 |
|
|
|
|
|
|
|
GPT_ARGS=" \ |
|
--num-layers 24 \ |
|
--hidden-size 1024 \ |
|
--num-attention-heads 16 \ |
|
--seq-length 1024 \ |
|
--max-position-embeddings 1024 \ |
|
--micro-batch-size 4 \ |
|
--global-batch-size 16 \ |
|
--lr 0.00015 \ |
|
--lr-decay-style cosine \ |
|
--min-lr 1.0e-5 \ |
|
--finetune \ |
|
--train-iters 1000 \ |
|
--lr-decay-iters 800 \ |
|
--lr-warmup-fraction .01 \ |
|
--weight-decay 1e-2 \ |
|
--clip-grad 1.0 \ |
|
--vocab-file $VOCAB_FILE \ |
|
--merge-file $MERGE_FILE \ |
|
--fp16 \ |
|
--checkpoint-activations \ |
|
" |
|
|
|
OUTPUT_ARGS=" \ |
|
--log-interval 10 \ |
|
--save-interval 500 \ |
|
--eval-interval 100 \ |
|
--eval-iters 10 \ |
|
" |
|
|
|
export LAUNCHER="python -u -m torch.distributed.launch \ |
|
--nproc_per_node $GPUS_PER_NODE \ |
|
--nnodes $NNODES \ |
|
--master_addr $MASTER_ADDR \ |
|
--master_port $MASTER_PORT \ |
|
" |
|
|
|
export CMD=" \ |
|
`pwd`/pretrain_gpt.py \ |
|
--tensor-model-parallel-size 2 \ |
|
--pipeline-model-parallel-size 2 \ |
|
$GPT_ARGS \ |
|
$OUTPUT_ARGS \ |
|
--save $SAVE_CHECKPOINT_PATH \ |
|
--load $CHECKPOINT_PATH \ |
|
--data-path $DATA_PATH \ |
|
--data-impl mmap \ |
|
--split 949,50,1 \ |
|
--distributed-backend nccl \ |
|
" |
|
|
|
|
|
srun bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' |
|
|