|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GPUS_PER_NODE=4 |
|
NNODES=$SLURM_JOB_NUM_NODES |
|
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) |
|
|
|
set -x -e |
|
|
|
source $six_ALL_CCFRWORK/start-prod |
|
|
|
cd $six_ALL_CCFRWORK/code/transformers |
|
export PYTHONPATH=$six_ALL_CCFRWORK/code/transformers |
|
|
|
MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) |
|
MASTER_PORT=13370 |
|
|
|
export LAUNCHER=" \ |
|
python -u -m torch.distributed.launch \ |
|
--nproc_per_node $GPUS_PER_NODE \ |
|
--nnodes $NNODES \ |
|
--master_addr $MASTER_ADDR \ |
|
--master_port $MASTER_PORT \ |
|
" |
|
|
|
MODEL=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron-gpt2-345m |
|
DATASET="stas/openwebtext-10k" |
|
|
|
export CMD=" \ |
|
`pwd`/examples/pytorch/language-modeling/run_clm.py \ |
|
--model_name_or_path $MODEL \ |
|
--dataset_name $DATASET \ |
|
--output_dir output_dir \ |
|
--overwrite_output_dir \ |
|
--do_train \ |
|
--do_eval \ |
|
--max_train_samples 1000 \ |
|
--max_eval_samples 200 \ |
|
--per_device_train_batch_size 4 \ |
|
--per_device_eval_batch_size 4 \ |
|
--num_train_epochs 1 \ |
|
--warmup_steps 8 \ |
|
--block_size 64 \ |
|
--fp16 \ |
|
--report_to none \ |
|
--deepspeed tests/deepspeed/ds_config_zero2.json \ |
|
" |
|
|
|
export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models |
|
export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets |
|
export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules |
|
export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics |
|
export PYTHONPATH=src |
|
export HF_DATASETS_OFFLINE=1 |
|
export TRANSFORMERS_OFFLINE=1 |
|
|
|
|
|
srun bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' |
|
|