File size: 2,569 Bytes
f53aa73 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
source /mnt/weka/peacock/idc/cronscript/venv/bin/activate ;
#ckpt_dir="/data/output/llama13b_600M/26-04-2024-09:36:12/"
#ckpt_dir="/data/output/llama13b_eh_800M/29-04-2024-14:20:34/"
ckpt_dir="/mnt/weka/peacock/idc/cronscript/ckpts/"
tools_dir="/mnt/weka/peacock/idc/mint/docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed_old/tools"
#sudo chmod -R 777 $ckpt_dir
old_ckpt_dir=$ckpt_dir"/checkpoints_zero_stage_2/"
univ_ckpt_dir=$ckpt_dir"/universal/"
hf_ckpt_dir=$ckpt_dir"/hf_ckpt/"
mkdir -p $old_ckpt_dir $univ_ckpt_dir $hf_ckpt_dir
wandb login 2b6ad71e332b0a53ea11b7282d283cc28b7e22a6
accelerate config --config_file /mnt/weka/peacock/idc/cronscript/default_config.yaml
export PYTHONPATH="${PYTHONPATH}:/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness"
#accelerate config --config_file ./default_config.yaml
old_ckpt_file=$old_ckpt_dir"/proc_files.txt"
if [ ! -f $old_ckpt_file ]; then
touch $old_ckpt_file
fi
readarray -t old_files < $old_ckpt_file
new_files=($(ls -l $old_ckpt_dir | grep ^d | awk '{print $NF'}))
if [ ${#old_files[@]} > ${#new_files[@]} ]; then
touch $old_ckpt_file
readarray -t old_files < $old_ckpt_file
fi;
new_ckpts=($(echo ${old_files[@]} ${new_files[@]} | tr ' ' '\n' | sort | uniq -u ))
for ckpt in ${new_ckpts[@]}; do
echo "evaluation running for ${ckpt}..."
convert_ds_to_univ="python $tools_dir/convert_checkpoint/ds_to_universal.py --input_folder $old_ckpt_dir/$ckpt --output_folder $univ_ckpt_dir/$ckpt"
convert_univ_to_hf="python $tools_dir/convert_checkpoint/universal_to_huggingface.py --universal-dir $univ_ckpt_dir/$ckpt --hf-dir $hf_ckpt_dir/$ckpt --model-type llama --config $ckpt_dir/mds_to_hf_llama_custom.json"
#run_cmd="accelerate launch --main_process_port 39500 -m lm_eval --model hf --model_args pretrained=$hf_ckpt_dir/$ckpt --tasks hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc --batch_size auto >$old_ckpt_dir/$ckpt/debug.out 2>$old_ckpt_dir/$ckpt/debug.err"
run_cmd="accelerate launch --main_process_port 39500 -m lm_eval --model hf --model_args pretrained=$hf_ckpt_dir/$ckpt --tasks indiccopa-hi --batch_size auto --wandb_args project=bharatgpt,group=trial_expt_1"
cd /mnt/weka/peacock/idc/mint/docker/intel_code/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed_old/
eval $convert_ds_to_univ
eval $convert_univ_to_hf
cp /mnt/weka/peacock/idc/cronscript/ckpts/hf_ckpt/tokenizer.model $hf_ckpt_dir/$ckpt/
cd /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness
eval $run_cmd
#echo $ckpt >> $old_ckpt_file
done;
|