File size: 1,004 Bytes
f4e2e17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
#!/bin/bash

HOME_DIR="${HOME_DIR:-/launch}";
WORKER_DIR="${WORKER_DIR:-${HOME_DIR}/experiments/hccl_test}";
MODEL_PATH=/hccl_demo;
MPI_ROOT="/opt/amazon/openmpi/";
SYNAPSE_VERSION="${SYNAPSE_VERSION:-1.9.0-738}";
HL_HOSTSFILE="${HOSTFILE:-/data/mint/docker/llama13b/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/scripts/hostsfile}";


NUM_NODES=$NUM_NODES;
NGPU_PER_NODE=8;
N_CARDS=$((NUM_NODES*NGPU_PER_NODE));
TIMESTAMP=$(date -d "today" +"%Y"-"%m"-"%d"_"%H"-"%M");


RESULT_DESCRIPTION="${RESULT_DESCRIPTION:-setup}";
RESULTS_DIR=$WORKER_DIR/results/llama_runs/$SYNAPSE_VERSION/$RESULT_DESCRIPTION/${N_CARDS}/${TIMESTAMP};
MPILOG_DIR=$RESULTS_DIR/mpi_log

DEBUG="${DEBUG:-False}";
if [ "$DEBUG" = "True" ]; then
        DEBUG_CMD="-x LOG_LEVEL_ALL="3" -x ENABLE_CONSOLE="true"";
fi


mkdir -p $RESULTS_DIR;
mkdir -p $MPILOG_DIR;
chmod -R a+rx $RESULTS_DIR;
chmod -R a+rx $MPILOG_DIR;
  
/Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/scripts/run_llama13b.sh