RUN_SH=`pwd`/sdt.sh | |
# Set TRAIN_BATCHES=1000 to add arg "--limit_train_batches 1000" mentioned in | |
# https://github.com/HabanaAI/Model-References/tree/master/PyTorch/generative_models/stable-diffusion-training#multi-server-training-examples | |
# It will run full epoch if TRAIN_BATCHES is not set | |
MPI_MODEL_ENV_VARS=" -x TRAIN_BATCHES=1000 " | |
CMD="mpirun \ | |
--tag-output \ | |
--allow-run-as-root \ | |
--bind-to none \ | |
--report-bindings \ | |
--npernode 1 \ | |
--hostfile hostsfile \ | |
-x MASTER_ADDR=$(head -n 1 hostsfile) | |
-x LD_PRELOAD=${LD_PRELOAD} \ | |
-x MODEL_GARDEN_ROOT $MPI_MODEL_ENV_VARS \ | |
${RUN_SH};" | |
echo $CMD | |
eval $CMD | |