picotron_bench / sweep_hf_official.sh
ferdinand.mom
add results bench picotron
6e14684
#!/bin/bash
# ✅
# Saturate DP for 1B on 1 node
# for grad_acc in 2 4 8 16 32 64 128; do
# for mbs in 64 32 16 8 4 2 1; do
# if [ $((grad_acc * mbs)) -eq 128 ]; then
# echo "Running with grad_acc=$grad_acc and mbs=$mbs"
# exp_name="dp8_tp1_pp1_mbs${mbs}_ga${grad_acc}_sl4096"
# python create_config.py --out_dir result_blog_posts --model_name TinyLlama/TinyLlama_v1.1 --exp_name saturate_dp_1b_1_node/$exp_name --mbs=$mbs --grad_acc=$grad_acc --dp=8 --tp=1 --pp=1 --seq_len=4096 --use_wandb
# fi
# done
# done
# ✅
# Saturate DP for 1B on 8 node
# for grad_acc in 1 2 4 8 16; do
# for mbs in 1 2 4 8 16; do
# if [ $((grad_acc * mbs)) -eq 16 ]; then
# exp_name="dp64_tp1_pp1_mbs${mbs}_ga${grad_acc}_sl4096"
# echo "Running with grad_acc=$grad_acc and mbs=$mbs"
# python create_config.py \
# --out_dir result_blog_posts \
# --model_name TinyLlama/TinyLlama_v1.1 \
# --exp_name saturate_dp_1b_8_node/$exp_name \
# --mbs=$mbs \
# --grad_acc=$grad_acc \
# --dp=64 \
# --tp=1 \
# --pp=1 \
# --seq_len=4096 \
# --use_wandb
# fi
# done
# done
# ✅
# # Saturate DP for 1B on 32 node
# for grad_acc in 1 2 4; do
# for mbs in 1 2 4; do
# if [ $((grad_acc * mbs)) -eq 4 ]; then
# exp_name="dp256_tp1_pp1_mbs${mbs}_ga${grad_acc}_sl4096"
# echo "Running with grad_acc=$grad_acc and mbs=$mbs"
# python create_config.py \
# --out_dir result_blog_posts \
# --model_name TinyLlama/TinyLlama_v1.1 \
# --exp_name saturate_dp_1b_32_node/$exp_name \
# --mbs=$mbs \
# --grad_acc=$grad_acc \
# --dp=256 \
# --tp=1 \
# --pp=1 \
# --seq_len=4096 \
# --use_wandb
# fi
# done
# done
# ✅
# # Sature DP for 1b on 1 node (with TP)
# for grad_acc in 1 2 4 8 16 32 64 128 256 512; do
# for mbs in 1 2 4 8 16 32 64 128 256 512; do
# for dp in 1 2 4; do
# if [ $((dp * mbs * grad_acc)) -eq 1024 ]; then
# for tp in 1 2 4 8; do
# if [ $((dp * tp)) -eq 8 ]; then
# exp_name="dp${dp}_tp${tp}_pp1_mbs${mbs}_ga${grad_acc}_sl4096"
# echo "Running with grad_acc=$grad_acc, mbs=$mbs, dp=$dp, tp=$tp"
# python create_config.py \
# --out_dir result_blog_posts \
# --model_name TinyLlama/TinyLlama_v1.1 \
# --exp_name saturate_dp_1b_1_node_tp/$exp_name \
# --mbs=$mbs \
# --grad_acc=$grad_acc \
# --dp=$dp \
# --tp=$tp \
# --pp=1 \
# --seq_len=4096 \
# --use_wandb
# fi
# done
# fi
# done
# done
# done
# ✅
# # Sature DP for 1b on 8 node (with TP)
# for grad_acc in 1 2 4 8 16 32 64 128 256 512; do
# for mbs in 1 2 4 8 16 32 64 128 256 512; do
# for dp in 1 2 4 8 16 32; do
# if [ $((dp * mbs * grad_acc)) -eq 1024 ]; then
# for tp in 1 2 4 8; do
# if [ $((dp * tp)) -eq 64 ]; then
# exp_name="dp${dp}_tp${tp}_pp1_mbs${mbs}_ga${grad_acc}_sl4096"
# echo "Running with grad_acc=$grad_acc, mbs=$mbs, dp=$dp, tp=$tp"
# python create_config.py \
# --out_dir result_blog_posts \
# --model_name TinyLlama/TinyLlama_v1.1 \
# --exp_name saturate_dp_1b_8_node_tp/$exp_name \
# --mbs=$mbs \
# --grad_acc=$grad_acc \
# --dp=$dp \
# --tp=$tp \
# --pp=1 \
# --seq_len=4096 \
# --use_wandb
# fi
# done
# fi
# done
# done
# done
# ✅
# # Saturated DP for 1B on 32 nodes (with TP)
# for grad_acc in 1 2 4 8 16 32 64 128 256 512; do
# for mbs in 1 2 4 8 16 32 64 128 256 512; do
# for dp in 1 2 4 8 16 32 64 128; do
# if [ $((dp * mbs * grad_acc)) -eq 1024 ]; then
# for tp in 1 2 4 8; do
# if [ $((dp * tp)) -eq 256 ]; then
# exp_name="dp${dp}_tp${tp}_pp1_mbs${mbs}_ga${grad_acc}_sl4096"
# echo "Running with grad_acc=$grad_acc, mbs=$mbs, dp=$dp, tp=$tp"
# python create_config.py \
# --out_dir result_blog_posts \
# --model_name TinyLlama/TinyLlama_v1.1 \
# --exp_name saturate_dp_1b_32_node_tp/$exp_name \
# --mbs=$mbs \
# --grad_acc=$grad_acc \
# --dp=$dp \
# --tp=$tp \
# --pp=1 \
# --seq_len=4096 \
# --use_wandb
# fi
# done
# fi
# done
# done
# done
# # ✅
# # Sature DP for 1b on 1 node (with PP 1f1b)
# for grad_acc in 1 2 4 8 16 32 64 128 256 512; do
# for mbs in 1 2 4 8 16 32 64 128 256 512; do
# for dp in 1 2 4; do
# if [ $((dp * mbs * grad_acc)) -eq 1024 ]; then
# for pp in 1 2 4 8; do
# if [ $((dp * pp)) -eq 8 ]; then
# exp_name="dp${dp}_tp1_pp${pp}_mbs${mbs}_ga${grad_acc}_sl4096"
# echo "Running with grad_acc=$grad_acc, mbs=$mbs, dp=$dp, tp=1, pp=$pp"
# python create_config.py \
# --out_dir result_blog_posts \
# --model_name TinyLlama/TinyLlama_v1.1 \
# --exp_name saturate_dp_1b_1_node_pp/$exp_name \
# --mbs=$mbs \
# --grad_acc=$grad_acc \
# --dp=$dp \
# --tp=1 \
# --pp=$pp \
# --pp_engine 1f1b \
# --seq_len=4096 \
# --use_wandb
# fi
# done
# fi
# done
# done
# done
# # ✅
# # Sature DP for 1b on 8 node (with PP 1f1b)
# for grad_acc in 1 2 4 8 16 32 64 128 256 512; do
# for mbs in 1 2 4 8 16 32 64 128 256 512; do
# for dp in 1 2 4 8 16 32; do
# if [ $((dp * mbs * grad_acc)) -eq 1024 ]; then
# for pp in 1 2 4 8 16 32 64; do
# if [ $((dp * pp)) -eq 64 ]; then
# exp_name="dp${dp}_tp1_pp${pp}_mbs${mbs}_ga${grad_acc}_sl4096"
# echo "Running with grad_acc=$grad_acc, mbs=$mbs, dp=$dp, tp=1, pp=$pp"
# python create_config.py \
# --out_dir result_blog_posts \
# --model_name TinyLlama/TinyLlama_v1.1 \
# --exp_name saturate_dp_1b_8_node_pp/$exp_name \
# --mbs=$mbs \
# --grad_acc=$grad_acc \
# --dp=$dp \
# --tp=1 \
# --pp=$pp \
# --pp_engine 1f1b \
# --seq_len=4096 \
# --use_wandb
# fi
# done
# fi
# done
# done
# done
# # ✅
# # Sature DP for 1b on 32 node (with PP 1f1b)
# for grad_acc in 1 2 4 8 16 32 64 128 256 512; do
# for mbs in 1 2 4 8 16 32 64 128 256 512; do
# for dp in 1 2 4 8 16 32 64 128; do
# if [ $((dp * mbs * grad_acc)) -eq 1024 ]; then
# for pp in 1 2 4 8 16 32 64 128 256; do
# if [ $((dp * pp)) -eq 256 ]; then
# exp_name="dp${dp}_tp1_pp${pp}_mbs${mbs}_ga${grad_acc}_sl4096"
# echo "Running with grad_acc=$grad_acc, mbs=$mbs, dp=$dp, tp=1, pp=$pp"
# python create_config.py \
# --out_dir result_blog_posts \
# --model_name TinyLlama/TinyLlama_v1.1 \
# --exp_name saturate_dp_1b_32_node_pp/$exp_name \
# --mbs=$mbs \
# --grad_acc=$grad_acc \
# --dp=$dp \
# --tp=1 \
# --pp=$pp \
# --pp_engine 1f1b \
# --seq_len=4096 \
# --use_wandb
# fi
# done
# fi
# done
# done
# done
# ✅
# # Try to fit 7B on 1 node
# for grad_acc in 1 2 4 8 16 32 64 128; do
# for mbs in 1 2 4 8 16 32 64 128; do
# if [ $((grad_acc * mbs)) -eq 128 ]; then
# exp_name="dp8_tp1_pp1_mbs${mbs}_ga${grad_acc}_sl4096"
# echo "Running with grad_acc=$grad_acc and mbs=$mbs"
# python create_config.py \
# --out_dir result_blog_posts \
# --model_name meta-llama/Llama-2-7b-hf \
# --exp_name try_to_fit_7b_1_node/$exp_name \
# --mbs=$mbs \
# --grad_acc=$grad_acc \
# --dp=8 \
# --tp=1 \
# --pp=1 \
# --seq_len=4096 \
# --use_wandb
# fi
# done
# done
# ✅
# # Saturate DP for 7B on 8 nodes (with TP)
# for grad_acc in 1 2 4 8 16 32 64 128 256; do
# for mbs in 1 2 4 8 16; do
# for dp in 1 2 4 8 16 32 64; do
# if [ $((dp * mbs * grad_acc)) -eq 1024 ]; then
# for tp in 1 2 4 8; do
# if [ $((dp * tp)) -eq 64 ]; then
# exp_name="dp${dp}_tp${tp}_pp1_mbs${mbs}_ga${grad_acc}_sl4096"
# echo "Running with grad_acc=$grad_acc, mbs=$mbs, dp=$dp, tp=$tp"
# python create_config.py \
# --out_dir result_blog_posts \
# --model_name meta-llama/Llama-2-7b-hf \
# --exp_name saturate_dp_7b_8_node_tp/$exp_name \
# --mbs=$mbs \
# --grad_acc=$grad_acc \
# --dp=$dp \
# --tp=$tp \
# --pp=1 \
# --seq_len=4096 \
# --use_wandb
# fi
# done
# fi
# done
# done
# done
# ✅
# # Saturate DP for 7B on 8 nodes and TP=16
# for grad_acc in 1 2 4 8 16 32 64 128 256; do
# for mbs in 1 2 4 8 16; do
# for dp in 1 2 4; do
# if [ $((dp * mbs * grad_acc)) -eq 1024 ]; then
# tp=16
# if [ $((dp * tp)) -eq 64 ]; then
# exp_name="dp${dp}_tp${tp}_pp1_mbs${mbs}_ga${grad_acc}_sl4096"
# echo "Running with grad_acc=$grad_acc, mbs=$mbs, dp=$dp, tp=$tp"
# python create_config.py \
# --out_dir result_blog_posts \
# --model_name meta-llama/Llama-2-7b-hf \
# --exp_name saturate_dp_7b_8_node_tp16/$exp_name \
# --mbs=$mbs \
# --grad_acc=$grad_acc \
# --dp=$dp \
# --tp=$tp \
# --pp=1 \
# --seq_len=4096 \
# --use_wandb
# fi
# fi
# done
# done
# done
# ✅
# # Saturate DP for 7B on 32 nodes
# for grad_acc in 1 2 4; do
# for mbs in 1 2 4; do
# if [ $((grad_acc * mbs)) -eq 4 ]; then
# exp_name="dp256_tp1_pp1_mbs${mbs}_ga${grad_acc}_sl4096"
# echo "Running with grad_acc=$grad_acc and mbs=$mbs"
# python create_config.py \
# --out_dir result_blog_posts \
# --model_name meta-llama/Llama-2-7b-hf \
# --exp_name saturate_dp_7b_32_node/$exp_name \
# --mbs=$mbs \
# --grad_acc=$grad_acc \
# --dp=256 \
# --tp=1 \
# --pp=1 \
# --seq_len=4096 \
# --use_wandb
# fi
# done
# done
# ✅
# # Saturate DP for 7B on 32 nodes (with TP)
# for grad_acc in 1 2 4 8 16 32 64 128 256; do
# for mbs in 1 2 4 8 16; do
# for dp in 1 2 4 8 16 32 64 128 256; do
# if [ $((dp * mbs * grad_acc)) -eq 1024 ]; then
# for tp in 1 2 4 8; do
# if [ $((dp * tp)) -eq 256 ]; then
# exp_name="dp${dp}_tp${tp}_pp1_mbs${mbs}_ga${grad_acc}_sl4096"
# echo "Running with grad_acc=$grad_acc, mbs=$mbs, dp=$dp, tp=$tp"
# python create_config.py \
# --out_dir result_blog_posts \
# --model_name meta-llama/Llama-2-7b-hf \
# --exp_name saturate_dp_7b_32_node_tp/$exp_name \
# --mbs=$mbs \
# --grad_acc=$grad_acc \
# --dp=$dp \
# --tp=$tp \
# --pp=1 \
# --seq_len=4096 \
# --use_wandb
# fi
# done
# fi
# done
# done
# done
# # ✅
# # Saturate DP for 7B on 8 nodes (with PP)
# for grad_acc in 1 2 4 8 16 32 64 128 256; do
# for mbs in 1 2 4 8 16; do
# for dp in 1 2 4 8 16 32 64; do
# if [ $((dp * mbs * grad_acc)) -eq 1024 ]; then
# for pp in 1 2 4 8 16 32 64; do
# if [ $((dp * pp)) -eq 64 ]; then
# exp_name="dp${dp}_tp1_pp${pp}_mbs${mbs}_ga${grad_acc}_sl4096"
# echo "Running with grad_acc=$grad_acc, mbs=$mbs, dp=$dp, pp=$pp"
# python create_config.py \
# --out_dir result_blog_posts \
# --model_name meta-llama/Llama-2-7b-hf \
# --exp_name saturate_dp_7b_8_node_pp/$exp_name \
# --mbs=$mbs \
# --grad_acc=$grad_acc \
# --dp=$dp \
# --tp=1 \
# --pp=$pp \
# --pp_engine 1f1b \
# --seq_len=4096 \
# --use_wandb
# fi
# done
# fi
# done
# done
# done
# # ✅
# # Saturate DP for 7B on 8 nodes and PP=16
# for grad_acc in 1 2 4 8 16 32 64 128 256; do
# for mbs in 1 2 4 8 16; do
# for dp in 1 2 4; do
# if [ $((dp * mbs * grad_acc)) -eq 1024 ]; then
# pp=16
# if [ $((dp * pp)) -eq 64 ]; then
# exp_name="dp${dp}_tp1_pp${pp}_mbs${mbs}_ga${grad_acc}_sl4096"
# echo "Running with grad_acc=$grad_acc, mbs=$mbs, dp=$dp, pp=$pp"
# python create_config.py \
# --out_dir result_blog_posts \
# --model_name meta-llama/Llama-2-7b-hf \
# --exp_name saturate_dp_7b_8_node_pp16/$exp_name \
# --mbs=$mbs \
# --grad_acc=$grad_acc \
# --dp=$dp \
# --tp=1 \
# --pp=$pp \
# --pp_engine 1f1b \
# --seq_len=4096 \
# --use_wandb
# fi
# fi
# done
# done
# done
# # ✅
# # Saturate DP for 7B on 32 nodes (with PP)
# for grad_acc in 1 2 4 8 16 32 64 128 256; do
# for mbs in 1 2 4 8 16; do
# for dp in 1 2 4 8 16 32 64 128; do
# if [ $((dp * mbs * grad_acc)) -eq 1024 ]; then
# for pp in 1 2 4 8 16 32 64 128 256; do
# if [ $((dp * pp)) -eq 256 ]; then
# exp_name="dp${dp}_tp1_pp${pp}_mbs${mbs}_ga${grad_acc}_sl4096"
# echo "Running with grad_acc=$grad_acc, mbs=$mbs, dp=$dp, pp=$pp"
# python create_config.py \
# --out_dir result_blog_posts \
# --model_name meta-llama/Llama-2-7b-hf \
# --exp_name saturate_dp_7b_32_node_pp/$exp_name \
# --mbs=$mbs \
# --grad_acc=$grad_acc \
# --dp=$dp \
# --tp=1 \
# --pp=$pp \
# --pp_engine 1f1b \
# --seq_len=4096 \
# --use_wandb
# fi
# done
# fi
# done
# done
# done
# ⌛
# 70B on 8 nodes (with DP + TP + PP)
for grad_acc in 1 2 4 8 16 32 64 128 256 512 1024; do
for dp in 1 2 4 8 16 32 64; do
if [ $((dp * grad_acc)) -eq 1024 ]; then
for tp in 2 4 8; do
for pp in 2 4 8 16 32 64; do
if [ $((dp * tp * pp)) -eq 64 ]; then
exp_name="dp${dp}_tp${tp}_pp${pp}_mbs1_ga${grad_acc}_sl4096"
echo "Running with grad_acc=$grad_acc, mbs=1, dp=$dp, tp=$tp, pp=$pp"
python create_config.py \
--out_dir result_blog_posts \
--model_name meta-llama/Llama-2-70b-hf \
--exp_name 70b_8_node_with_dp_tp_pp/$exp_name \
--mbs=1 \
--grad_acc=$grad_acc \
--dp=$dp \
--tp=$tp \
--pp=$pp \
--pp_engine 1f1b \
--seq_len=4096 \
--use_wandb \
--hf_hub_safetensors_path /fsx/ferdinandmom/hf_model_ckpt/Llama-2-70b-chat-hf
fi
done
done
fi
done
done
# ⌛
# 70B on 32 nodes (with DP + TP + PP)
for grad_acc in 1 2 4 8 16 32 64 128 256 512 1024; do
for dp in 1 2 4 8 16 32 64 128 256; do
if [ $((dp * grad_acc)) -eq 1024 ]; then
for tp in 2 4 8; do
for pp in 2 4 8 16 32 64 128 256; do
if [ $((dp * tp * pp)) -eq 256 ]; then
exp_name="dp${dp}_tp${tp}_pp${pp}_mbs1_ga${grad_acc}_sl4096"
echo "Running with grad_acc=$grad_acc, mbs=1, dp=$dp, tp=$tp, pp=$pp"
python create_config.py \
--out_dir result_blog_posts \
--model_name meta-llama/Llama-2-70b-hf \
--exp_name 70b_32_node_with_dp_tp_pp/$exp_name \
--mbs=1 \
--grad_acc=$grad_acc \
--dp=$dp \
--tp=$tp \
--pp=$pp \
--pp_engine 1f1b \
--seq_len=4096 \
--use_wandb \
--hf_hub_safetensors_path /fsx/ferdinandmom/hf_model_ckpt/Llama-2-70b-chat-hf
fi
done
done
fi
done
done