ferdinand.mom commited on
Commit
6e14684
·
1 Parent(s): 03cf31b

add results bench picotron

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/config.json +51 -0
  2. result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/job.slurm +94 -0
  3. result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/log_12439713.out +0 -0
  4. result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/metrics.csv +2 -0
  5. result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/status.txt +1 -0
  6. result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/config.json +51 -0
  7. result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/job.slurm +94 -0
  8. result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/log_12439716.out +0 -0
  9. result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/metrics.csv +2 -0
  10. result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/status.txt +1 -0
  11. result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/config.json +51 -0
  12. result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/job.slurm +94 -0
  13. result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/log_12439715.out +0 -0
  14. result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/metrics.csv +2 -0
  15. result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/status.txt +1 -0
  16. result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/config.json +51 -0
  17. result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/job.slurm +94 -0
  18. result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/log_12368052.out +0 -0
  19. result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/metrics.csv +2 -0
  20. result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/status.txt +1 -0
  21. result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/config.json +51 -0
  22. result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/job.slurm +94 -0
  23. result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/log_12368049.out +0 -0
  24. result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/metrics.csv +2 -0
  25. result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/status.txt +1 -0
  26. result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/config.json +51 -0
  27. result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/job.slurm +94 -0
  28. result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/log_12368053.out +0 -0
  29. result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/metrics.csv +2 -0
  30. result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/status.txt +1 -0
  31. result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/config.json +51 -0
  32. result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/job.slurm +94 -0
  33. result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/log_12368050.out +0 -0
  34. result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/metrics.csv +2 -0
  35. result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/status.txt +1 -0
  36. result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/config.json +51 -0
  37. result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/job.slurm +94 -0
  38. result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/log_12385595.out +0 -0
  39. result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/metrics.csv +2 -0
  40. result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/status.txt +1 -0
  41. result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/config.json +51 -0
  42. result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/job.slurm +94 -0
  43. result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/log_12439723.out +0 -0
  44. result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/metrics.csv +2 -0
  45. result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/status.txt +1 -0
  46. result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/config.json +51 -0
  47. result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/job.slurm +94 -0
  48. result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/log_12439711.out +0 -0
  49. result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/metrics.csv +2 -0
  50. result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/status.txt +1 -0
result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "distributed": {
3
+ "tp_size": 2,
4
+ "cp_size": 1,
5
+ "pp_size": 8,
6
+ "dp_size": 16,
7
+ "pp_engine": "1f1b",
8
+ "backend": "nccl",
9
+ "use_cpu": false
10
+ },
11
+ "model": {
12
+ "name": "meta-llama/Llama-2-70b-hf",
13
+ "num_hidden_layers": 80,
14
+ "num_attention_heads": 64,
15
+ "num_key_value_heads": 8,
16
+ "dtype": "bfloat16",
17
+ "use_flash_attention": true,
18
+ "use_fused_adam": false
19
+ },
20
+ "training": {
21
+ "seed": 42,
22
+ "learning_rate": 0.0003,
23
+ "total_train_steps": 200,
24
+ "seq_length": 4096,
25
+ "micro_batch_size": 1,
26
+ "gradient_accumulation_steps": 64,
27
+ "num_samples": 400000,
28
+ "max_tokens": null
29
+ },
30
+ "dataset": {
31
+ "name": "roneneldan/TinyStories",
32
+ "num_workers": 0,
33
+ "num_proc": 4
34
+ },
35
+ "checkpoint": {
36
+ "save_dir": "result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096",
37
+ "save_frequency": 300,
38
+ "load_path": "",
39
+ "hf_hub_safetensors_path": "/fsx/ferdinandmom/hf_model_ckpt/Llama-2-70b-chat-hf"
40
+ },
41
+ "logging": {
42
+ "use_wandb": true,
43
+ "project_name": "picotron",
44
+ "run_name": "70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096"
45
+ },
46
+ "environment": {
47
+ "OMP_NUM_THREADS": "1",
48
+ "TOKENIZERS_PARALLELISM": "false",
49
+ "FLASH_ATTEN": "1"
50
+ }
51
+ }
result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/job.slurm ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=job-picotron
4
+ #SBATCH --time=00:30:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --nodes=32
7
+ #SBATCH --gres=gpu:8
8
+ #SBATCH --qos=normal
9
+ #SBATCH --ntasks-per-node=1
10
+ #SBATCH --cpus-per-task=96
11
+ #SBATCH --exclusive
12
+ #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/log_%j.out
13
+ #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/log_%j.out
14
+
15
+ # Function to update status based on squeue output
16
+ update_status() {
17
+ job_id=$1
18
+ status_file=$2
19
+ # For unknown reasons, it doenst update status for pending. It only works for running
20
+ while true; do
21
+ job_status=$(squeue --job $job_id --noheader --format=%T)
22
+ echo "Job status: $job_status"
23
+ if [ -z "$job_status" ]; then
24
+ # Job has finished or is not found
25
+ break
26
+ elif [ "$job_status" = "RUNNING" ]; then
27
+ printf "running" > $status_file
28
+ break
29
+ fi
30
+ sleep 10
31
+ done
32
+ }
33
+
34
+ # Misc initializations.
35
+ echo "========================"
36
+ echo "START TIME: $(date)"
37
+ source /etc/profile.d/modules.sh
38
+ source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
39
+ conda activate /fsx/ferdinandmom/miniforge3/envs/env-picotron
40
+ echo python3 version = $(python3 --version)
41
+ echo "========================"
42
+
43
+ # Slurm stuff
44
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
45
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
46
+ export MASTER_PORT=$((1024 + RANDOM % 64511))
47
+
48
+ export TMPDIR=/scratch
49
+ export TORCH_HOME=/fsx/$USER/.cache/torch
50
+ export HF_HOME=/fsx/$USER/.cache/huggingface
51
+ export WANDB_DIR=/fsx/$USER/.cache/wandb
52
+ export CUBLAS_WORKSPACE_CONFIG=":4096:8"
53
+ export CUDA_DEVICE_MAX_CONNECTIONS="1"
54
+ export FI_PROVIDER="efa"
55
+
56
+ module load cuda/12.1
57
+
58
+ GIT_REPO="/fsx/ferdinandmom/ferdinand-hf/picotron/"
59
+ CMD="$GIT_REPO/train.py --config /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/config.json"
60
+
61
+ git checkout loading_big_model
62
+ # huggingface-cli login --token $HUGGINGFACE_TOKEN
63
+
64
+ LAUNCHER="torchrun --nproc_per_node=8 --nnode=32 --node_rank=$SLURM_NODEID --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} --rdzv_backend c10d --max_restarts 0 --tee 3"
65
+
66
+ # Checkout the bench_cluster branch
67
+ cd $GIT_REPO
68
+ # Get the current job ID
69
+ job_id=${SLURM_JOB_ID}
70
+
71
+ # Update status to "pending" or "running" in the background
72
+ update_status $job_id /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/status.txt &
73
+
74
+ # Run the main command
75
+ echo "Running command: $CMD"
76
+ srun -u $LAUNCHER $CMD
77
+ exit_status=$?
78
+
79
+ job_id=$SLURM_JOB_ID
80
+
81
+ # Update status based on the exit status of `srun`
82
+ if [ $exit_status -eq 0 ]; then
83
+ printf "completed" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/status.txt
84
+ else
85
+ if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/log_${job_id}.out; then
86
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/status.txt
87
+ elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/log_${job_id}.out; then
88
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/status.txt
89
+ elif grep -q "Timeout" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/log_${job_id}.out; then
90
+ printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/status.txt
91
+ else
92
+ printf "fail" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/status.txt
93
+ fi
94
+ fi
result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/log_12439713.out ADDED
The diff for this file is too large to render. See raw diff
 
result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/metrics.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ run_name,status,dp,tp,pp,micro_batch_size,grad_acc,seq_len,avg_tokens_s_gpu,avg_mfu
2
+ dp16_tp2_pp8_mbs1_ga64_sl4096,oom,16,2,8,1,64,4096,,
result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp2_pp8_mbs1_ga64_sl4096/status.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ oom
result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "distributed": {
3
+ "tp_size": 4,
4
+ "cp_size": 1,
5
+ "pp_size": 4,
6
+ "dp_size": 16,
7
+ "pp_engine": "1f1b",
8
+ "backend": "nccl",
9
+ "use_cpu": false
10
+ },
11
+ "model": {
12
+ "name": "meta-llama/Llama-2-70b-hf",
13
+ "num_hidden_layers": 80,
14
+ "num_attention_heads": 64,
15
+ "num_key_value_heads": 8,
16
+ "dtype": "bfloat16",
17
+ "use_flash_attention": true,
18
+ "use_fused_adam": false
19
+ },
20
+ "training": {
21
+ "seed": 42,
22
+ "learning_rate": 0.0003,
23
+ "total_train_steps": 200,
24
+ "seq_length": 4096,
25
+ "micro_batch_size": 1,
26
+ "gradient_accumulation_steps": 64,
27
+ "num_samples": 400000,
28
+ "max_tokens": null
29
+ },
30
+ "dataset": {
31
+ "name": "roneneldan/TinyStories",
32
+ "num_workers": 0,
33
+ "num_proc": 4
34
+ },
35
+ "checkpoint": {
36
+ "save_dir": "result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096",
37
+ "save_frequency": 300,
38
+ "load_path": "",
39
+ "hf_hub_safetensors_path": "/fsx/ferdinandmom/hf_model_ckpt/Llama-2-70b-chat-hf"
40
+ },
41
+ "logging": {
42
+ "use_wandb": true,
43
+ "project_name": "picotron",
44
+ "run_name": "70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096"
45
+ },
46
+ "environment": {
47
+ "OMP_NUM_THREADS": "1",
48
+ "TOKENIZERS_PARALLELISM": "false",
49
+ "FLASH_ATTEN": "1"
50
+ }
51
+ }
result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/job.slurm ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=job-picotron
4
+ #SBATCH --time=00:30:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --nodes=32
7
+ #SBATCH --gres=gpu:8
8
+ #SBATCH --qos=normal
9
+ #SBATCH --ntasks-per-node=1
10
+ #SBATCH --cpus-per-task=96
11
+ #SBATCH --exclusive
12
+ #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/log_%j.out
13
+ #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/log_%j.out
14
+
15
+ # Function to update status based on squeue output
16
+ update_status() {
17
+ job_id=$1
18
+ status_file=$2
19
+ # For unknown reasons, it doenst update status for pending. It only works for running
20
+ while true; do
21
+ job_status=$(squeue --job $job_id --noheader --format=%T)
22
+ echo "Job status: $job_status"
23
+ if [ -z "$job_status" ]; then
24
+ # Job has finished or is not found
25
+ break
26
+ elif [ "$job_status" = "RUNNING" ]; then
27
+ printf "running" > $status_file
28
+ break
29
+ fi
30
+ sleep 10
31
+ done
32
+ }
33
+
34
+ # Misc initializations.
35
+ echo "========================"
36
+ echo "START TIME: $(date)"
37
+ source /etc/profile.d/modules.sh
38
+ source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
39
+ conda activate /fsx/ferdinandmom/miniforge3/envs/env-picotron
40
+ echo python3 version = $(python3 --version)
41
+ echo "========================"
42
+
43
+ # Slurm stuff
44
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
45
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
46
+ export MASTER_PORT=$((1024 + RANDOM % 64511))
47
+
48
+ export TMPDIR=/scratch
49
+ export TORCH_HOME=/fsx/$USER/.cache/torch
50
+ export HF_HOME=/fsx/$USER/.cache/huggingface
51
+ export WANDB_DIR=/fsx/$USER/.cache/wandb
52
+ export CUBLAS_WORKSPACE_CONFIG=":4096:8"
53
+ export CUDA_DEVICE_MAX_CONNECTIONS="1"
54
+ export FI_PROVIDER="efa"
55
+
56
+ module load cuda/12.1
57
+
58
+ GIT_REPO="/fsx/ferdinandmom/ferdinand-hf/picotron/"
59
+ CMD="$GIT_REPO/train.py --config /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/config.json"
60
+
61
+ git checkout loading_big_model
62
+ # huggingface-cli login --token $HUGGINGFACE_TOKEN
63
+
64
+ LAUNCHER="torchrun --nproc_per_node=8 --nnode=32 --node_rank=$SLURM_NODEID --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} --rdzv_backend c10d --max_restarts 0 --tee 3"
65
+
66
+ # Checkout the bench_cluster branch
67
+ cd $GIT_REPO
68
+ # Get the current job ID
69
+ job_id=${SLURM_JOB_ID}
70
+
71
+ # Update status to "pending" or "running" in the background
72
+ update_status $job_id /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/status.txt &
73
+
74
+ # Run the main command
75
+ echo "Running command: $CMD"
76
+ srun -u $LAUNCHER $CMD
77
+ exit_status=$?
78
+
79
+ job_id=$SLURM_JOB_ID
80
+
81
+ # Update status based on the exit status of `srun`
82
+ if [ $exit_status -eq 0 ]; then
83
+ printf "completed" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/status.txt
84
+ else
85
+ if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/log_${job_id}.out; then
86
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/status.txt
87
+ elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/log_${job_id}.out; then
88
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/status.txt
89
+ elif grep -q "Timeout" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/log_${job_id}.out; then
90
+ printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/status.txt
91
+ else
92
+ printf "fail" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/status.txt
93
+ fi
94
+ fi
result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/log_12439716.out ADDED
The diff for this file is too large to render. See raw diff
 
result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/metrics.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ run_name,status,dp,tp,pp,micro_batch_size,grad_acc,seq_len,avg_tokens_s_gpu,avg_mfu
2
+ dp16_tp4_pp4_mbs1_ga64_sl4096,oom,16,4,4,1,64,4096,,
result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp4_pp4_mbs1_ga64_sl4096/status.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ oom
result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "distributed": {
3
+ "tp_size": 8,
4
+ "cp_size": 1,
5
+ "pp_size": 2,
6
+ "dp_size": 16,
7
+ "pp_engine": "1f1b",
8
+ "backend": "nccl",
9
+ "use_cpu": false
10
+ },
11
+ "model": {
12
+ "name": "meta-llama/Llama-2-70b-hf",
13
+ "num_hidden_layers": 80,
14
+ "num_attention_heads": 64,
15
+ "num_key_value_heads": 8,
16
+ "dtype": "bfloat16",
17
+ "use_flash_attention": true,
18
+ "use_fused_adam": false
19
+ },
20
+ "training": {
21
+ "seed": 42,
22
+ "learning_rate": 0.0003,
23
+ "total_train_steps": 200,
24
+ "seq_length": 4096,
25
+ "micro_batch_size": 1,
26
+ "gradient_accumulation_steps": 64,
27
+ "num_samples": 400000,
28
+ "max_tokens": null
29
+ },
30
+ "dataset": {
31
+ "name": "roneneldan/TinyStories",
32
+ "num_workers": 0,
33
+ "num_proc": 4
34
+ },
35
+ "checkpoint": {
36
+ "save_dir": "result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096",
37
+ "save_frequency": 300,
38
+ "load_path": "",
39
+ "hf_hub_safetensors_path": "/fsx/ferdinandmom/hf_model_ckpt/Llama-2-70b-chat-hf"
40
+ },
41
+ "logging": {
42
+ "use_wandb": true,
43
+ "project_name": "picotron",
44
+ "run_name": "70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096"
45
+ },
46
+ "environment": {
47
+ "OMP_NUM_THREADS": "1",
48
+ "TOKENIZERS_PARALLELISM": "false",
49
+ "FLASH_ATTEN": "1"
50
+ }
51
+ }
result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/job.slurm ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=job-picotron
4
+ #SBATCH --time=00:30:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --nodes=32
7
+ #SBATCH --gres=gpu:8
8
+ #SBATCH --qos=normal
9
+ #SBATCH --ntasks-per-node=1
10
+ #SBATCH --cpus-per-task=96
11
+ #SBATCH --exclusive
12
+ #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/log_%j.out
13
+ #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/log_%j.out
14
+
15
+ # Function to update status based on squeue output
16
+ update_status() {
17
+ job_id=$1
18
+ status_file=$2
19
+ # For unknown reasons, it doenst update status for pending. It only works for running
20
+ while true; do
21
+ job_status=$(squeue --job $job_id --noheader --format=%T)
22
+ echo "Job status: $job_status"
23
+ if [ -z "$job_status" ]; then
24
+ # Job has finished or is not found
25
+ break
26
+ elif [ "$job_status" = "RUNNING" ]; then
27
+ printf "running" > $status_file
28
+ break
29
+ fi
30
+ sleep 10
31
+ done
32
+ }
33
+
34
+ # Misc initializations.
35
+ echo "========================"
36
+ echo "START TIME: $(date)"
37
+ source /etc/profile.d/modules.sh
38
+ source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
39
+ conda activate /fsx/ferdinandmom/miniforge3/envs/env-picotron
40
+ echo python3 version = $(python3 --version)
41
+ echo "========================"
42
+
43
+ # Slurm stuff
44
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
45
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
46
+ export MASTER_PORT=$((1024 + RANDOM % 64511))
47
+
48
+ export TMPDIR=/scratch
49
+ export TORCH_HOME=/fsx/$USER/.cache/torch
50
+ export HF_HOME=/fsx/$USER/.cache/huggingface
51
+ export WANDB_DIR=/fsx/$USER/.cache/wandb
52
+ export CUBLAS_WORKSPACE_CONFIG=":4096:8"
53
+ export CUDA_DEVICE_MAX_CONNECTIONS="1"
54
+ export FI_PROVIDER="efa"
55
+
56
+ module load cuda/12.1
57
+
58
+ GIT_REPO="/fsx/ferdinandmom/ferdinand-hf/picotron/"
59
+ CMD="$GIT_REPO/train.py --config /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/config.json"
60
+
61
+ git checkout loading_big_model
62
+ # huggingface-cli login --token $HUGGINGFACE_TOKEN
63
+
64
+ LAUNCHER="torchrun --nproc_per_node=8 --nnode=32 --node_rank=$SLURM_NODEID --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} --rdzv_backend c10d --max_restarts 0 --tee 3"
65
+
66
+ # Checkout the bench_cluster branch
67
+ cd $GIT_REPO
68
+ # Get the current job ID
69
+ job_id=${SLURM_JOB_ID}
70
+
71
+ # Update status to "pending" or "running" in the background
72
+ update_status $job_id /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/status.txt &
73
+
74
+ # Run the main command
75
+ echo "Running command: $CMD"
76
+ srun -u $LAUNCHER $CMD
77
+ exit_status=$?
78
+
79
+ job_id=$SLURM_JOB_ID
80
+
81
+ # Update status based on the exit status of `srun`
82
+ if [ $exit_status -eq 0 ]; then
83
+ printf "completed" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/status.txt
84
+ else
85
+ if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/log_${job_id}.out; then
86
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/status.txt
87
+ elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/log_${job_id}.out; then
88
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/status.txt
89
+ elif grep -q "Timeout" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/log_${job_id}.out; then
90
+ printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/status.txt
91
+ else
92
+ printf "fail" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/status.txt
93
+ fi
94
+ fi
result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/log_12439715.out ADDED
The diff for this file is too large to render. See raw diff
 
result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/metrics.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ run_name,status,dp,tp,pp,micro_batch_size,grad_acc,seq_len,avg_tokens_s_gpu,avg_mfu
2
+ dp16_tp8_pp2_mbs1_ga64_sl4096,oom,16,8,2,1,64,4096,,
result_blog_posts/70b_32_node_with_dp_tp_pp/dp16_tp8_pp2_mbs1_ga64_sl4096/status.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ oom
result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "distributed": {
3
+ "tp_size": 2,
4
+ "cp_size": 1,
5
+ "pp_size": 128,
6
+ "dp_size": 1,
7
+ "pp_engine": "1f1b",
8
+ "backend": "nccl",
9
+ "use_cpu": false
10
+ },
11
+ "model": {
12
+ "name": "meta-llama/Llama-2-70b-hf",
13
+ "num_hidden_layers": 80,
14
+ "num_attention_heads": 64,
15
+ "num_key_value_heads": 8,
16
+ "dtype": "bfloat16",
17
+ "use_flash_attention": true,
18
+ "use_fused_adam": false
19
+ },
20
+ "training": {
21
+ "seed": 42,
22
+ "learning_rate": 0.0003,
23
+ "total_train_steps": 200,
24
+ "seq_length": 4096,
25
+ "micro_batch_size": 1,
26
+ "gradient_accumulation_steps": 1024,
27
+ "num_samples": 400000,
28
+ "max_tokens": null
29
+ },
30
+ "dataset": {
31
+ "name": "roneneldan/TinyStories",
32
+ "num_workers": 0,
33
+ "num_proc": 4
34
+ },
35
+ "checkpoint": {
36
+ "save_dir": "result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096",
37
+ "save_frequency": 300,
38
+ "load_path": "",
39
+ "hf_hub_safetensors_path": "/fsx/ferdinandmom/hf_model_ckpt/Llama-2-70b-chat-hf"
40
+ },
41
+ "logging": {
42
+ "use_wandb": true,
43
+ "project_name": "picotron",
44
+ "run_name": "70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096"
45
+ },
46
+ "environment": {
47
+ "OMP_NUM_THREADS": "1",
48
+ "TOKENIZERS_PARALLELISM": "false",
49
+ "FLASH_ATTEN": "1"
50
+ }
51
+ }
result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/job.slurm ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=job-picotron
4
+ #SBATCH --time=00:30:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --nodes=32
7
+ #SBATCH --gres=gpu:8
8
+ #SBATCH --qos=normal
9
+ #SBATCH --ntasks-per-node=1
10
+ #SBATCH --cpus-per-task=96
11
+ #SBATCH --exclusive
12
+ #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/log_%j.out
13
+ #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/log_%j.out
14
+
15
+ # Function to update status based on squeue output
16
+ update_status() {
17
+ job_id=$1
18
+ status_file=$2
19
+ # For unknown reasons, it doenst update status for pending. It only works for running
20
+ while true; do
21
+ job_status=$(squeue --job $job_id --noheader --format=%T)
22
+ echo "Job status: $job_status"
23
+ if [ -z "$job_status" ]; then
24
+ # Job has finished or is not found
25
+ break
26
+ elif [ "$job_status" = "RUNNING" ]; then
27
+ printf "running" > $status_file
28
+ break
29
+ fi
30
+ sleep 10
31
+ done
32
+ }
33
+
34
+ # Misc initializations.
35
+ echo "========================"
36
+ echo "START TIME: $(date)"
37
+ source /etc/profile.d/modules.sh
38
+ source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
39
+ conda activate /fsx/ferdinandmom/miniforge3/envs/env-picotron
40
+ echo python3 version = $(python3 --version)
41
+ echo "========================"
42
+
43
+ # Slurm stuff
44
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
45
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
46
+ export MASTER_PORT=$((1024 + RANDOM % 64511))
47
+
48
+ export TMPDIR=/scratch
49
+ export TORCH_HOME=/fsx/$USER/.cache/torch
50
+ export HF_HOME=/fsx/$USER/.cache/huggingface
51
+ export WANDB_DIR=/fsx/$USER/.cache/wandb
52
+ export CUBLAS_WORKSPACE_CONFIG=":4096:8"
53
+ export CUDA_DEVICE_MAX_CONNECTIONS="1"
54
+ export FI_PROVIDER="efa"
55
+
56
+ module load cuda/12.1
57
+
58
+ GIT_REPO="/fsx/ferdinandmom/ferdinand-hf/picotron/"
59
+ CMD="$GIT_REPO/train.py --config /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/config.json"
60
+
61
+ git checkout loading_big_model
62
+ # huggingface-cli login --token $HUGGINGFACE_TOKEN
63
+
64
+ LAUNCHER="torchrun --nproc_per_node=8 --nnode=32 --node_rank=$SLURM_NODEID --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} --rdzv_backend c10d --max_restarts 0 --tee 3"
65
+
66
+ # Checkout the bench_cluster branch
67
+ cd $GIT_REPO
68
+ # Get the current job ID
69
+ job_id=${SLURM_JOB_ID}
70
+
71
+ # Update status to "pending" or "running" in the background
72
+ update_status $job_id /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/status.txt &
73
+
74
+ # Run the main command
75
+ echo "Running command: $CMD"
76
+ srun -u $LAUNCHER $CMD
77
+ exit_status=$?
78
+
79
+ job_id=$SLURM_JOB_ID
80
+
81
+ # Update status based on the exit status of `srun`
82
+ if [ $exit_status -eq 0 ]; then
83
+ printf "completed" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/status.txt
84
+ else
85
+ if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/log_${job_id}.out; then
86
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/status.txt
87
+ elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/log_${job_id}.out; then
88
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/status.txt
89
+ elif grep -q "Timeout" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/log_${job_id}.out; then
90
+ printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/status.txt
91
+ else
92
+ printf "fail" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/status.txt
93
+ fi
94
+ fi
result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/log_12368052.out ADDED
The diff for this file is too large to render. See raw diff
 
result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/metrics.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ run_name,status,dp,tp,pp,micro_batch_size,grad_acc,seq_len,avg_tokens_s_gpu,avg_mfu
2
+ dp1_tp2_pp128_mbs1_ga1024_sl4096,timeout,1,2,128,1,1024,4096,,
result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp2_pp128_mbs1_ga1024_sl4096/status.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ timeout
result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "distributed": {
3
+ "tp_size": 4,
4
+ "cp_size": 1,
5
+ "pp_size": 64,
6
+ "dp_size": 1,
7
+ "pp_engine": "1f1b",
8
+ "backend": "nccl",
9
+ "use_cpu": false
10
+ },
11
+ "model": {
12
+ "name": "meta-llama/Llama-2-70b-hf",
13
+ "num_hidden_layers": 80,
14
+ "num_attention_heads": 64,
15
+ "num_key_value_heads": 8,
16
+ "dtype": "bfloat16",
17
+ "use_flash_attention": true,
18
+ "use_fused_adam": false
19
+ },
20
+ "training": {
21
+ "seed": 42,
22
+ "learning_rate": 0.0003,
23
+ "total_train_steps": 200,
24
+ "seq_length": 4096,
25
+ "micro_batch_size": 1,
26
+ "gradient_accumulation_steps": 1024,
27
+ "num_samples": 400000,
28
+ "max_tokens": null
29
+ },
30
+ "dataset": {
31
+ "name": "roneneldan/TinyStories",
32
+ "num_workers": 0,
33
+ "num_proc": 4
34
+ },
35
+ "checkpoint": {
36
+ "save_dir": "result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096",
37
+ "save_frequency": 300,
38
+ "load_path": "",
39
+ "hf_hub_safetensors_path": "/fsx/ferdinandmom/hf_model_ckpt/Llama-2-70b-chat-hf"
40
+ },
41
+ "logging": {
42
+ "use_wandb": true,
43
+ "project_name": "picotron",
44
+ "run_name": "70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096"
45
+ },
46
+ "environment": {
47
+ "OMP_NUM_THREADS": "1",
48
+ "TOKENIZERS_PARALLELISM": "false",
49
+ "FLASH_ATTEN": "1"
50
+ }
51
+ }
result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/job.slurm ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=job-picotron
4
+ #SBATCH --time=00:30:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --nodes=32
7
+ #SBATCH --gres=gpu:8
8
+ #SBATCH --qos=normal
9
+ #SBATCH --ntasks-per-node=1
10
+ #SBATCH --cpus-per-task=96
11
+ #SBATCH --exclusive
12
+ #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/log_%j.out
13
+ #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/log_%j.out
14
+
15
+ # Function to update status based on squeue output
16
+ update_status() {
17
+ job_id=$1
18
+ status_file=$2
19
+ # For unknown reasons, it doenst update status for pending. It only works for running
20
+ while true; do
21
+ job_status=$(squeue --job $job_id --noheader --format=%T)
22
+ echo "Job status: $job_status"
23
+ if [ -z "$job_status" ]; then
24
+ # Job has finished or is not found
25
+ break
26
+ elif [ "$job_status" = "RUNNING" ]; then
27
+ printf "running" > $status_file
28
+ break
29
+ fi
30
+ sleep 10
31
+ done
32
+ }
33
+
34
+ # Misc initializations.
35
+ echo "========================"
36
+ echo "START TIME: $(date)"
37
+ source /etc/profile.d/modules.sh
38
+ source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
39
+ conda activate /fsx/ferdinandmom/miniforge3/envs/env-picotron
40
+ echo python3 version = $(python3 --version)
41
+ echo "========================"
42
+
43
+ # Slurm stuff
44
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
45
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
46
+ export MASTER_PORT=$((1024 + RANDOM % 64511))
47
+
48
+ export TMPDIR=/scratch
49
+ export TORCH_HOME=/fsx/$USER/.cache/torch
50
+ export HF_HOME=/fsx/$USER/.cache/huggingface
51
+ export WANDB_DIR=/fsx/$USER/.cache/wandb
52
+ export CUBLAS_WORKSPACE_CONFIG=":4096:8"
53
+ export CUDA_DEVICE_MAX_CONNECTIONS="1"
54
+ export FI_PROVIDER="efa"
55
+
56
+ module load cuda/12.1
57
+
58
+ GIT_REPO="/fsx/ferdinandmom/ferdinand-hf/picotron/"
59
+ CMD="$GIT_REPO/train.py --config /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/config.json"
60
+
61
+ git checkout loading_big_model
62
+ # huggingface-cli login --token $HUGGINGFACE_TOKEN
63
+
64
+ LAUNCHER="torchrun --nproc_per_node=8 --nnode=32 --node_rank=$SLURM_NODEID --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} --rdzv_backend c10d --max_restarts 0 --tee 3"
65
+
66
+ # Checkout the bench_cluster branch
67
+ cd $GIT_REPO
68
+ # Get the current job ID
69
+ job_id=${SLURM_JOB_ID}
70
+
71
+ # Update status to "pending" or "running" in the background
72
+ update_status $job_id /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/status.txt &
73
+
74
+ # Run the main command
75
+ echo "Running command: $CMD"
76
+ srun -u $LAUNCHER $CMD
77
+ exit_status=$?
78
+
79
+ job_id=$SLURM_JOB_ID
80
+
81
+ # Update status based on the exit status of `srun`
82
+ if [ $exit_status -eq 0 ]; then
83
+ printf "completed" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/status.txt
84
+ else
85
+ if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/log_${job_id}.out; then
86
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/status.txt
87
+ elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/log_${job_id}.out; then
88
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/status.txt
89
+ elif grep -q "Timeout" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/log_${job_id}.out; then
90
+ printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/status.txt
91
+ else
92
+ printf "fail" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/status.txt
93
+ fi
94
+ fi
result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/log_12368049.out ADDED
The diff for this file is too large to render. See raw diff
 
result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/metrics.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ run_name,status,dp,tp,pp,micro_batch_size,grad_acc,seq_len,avg_tokens_s_gpu,avg_mfu
2
+ dp1_tp4_pp64_mbs1_ga1024_sl4096,fail,1,4,64,1,1024,4096,,
result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp4_pp64_mbs1_ga1024_sl4096/status.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ fail
result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "distributed": {
3
+ "tp_size": 8,
4
+ "cp_size": 1,
5
+ "pp_size": 32,
6
+ "dp_size": 1,
7
+ "pp_engine": "1f1b",
8
+ "backend": "nccl",
9
+ "use_cpu": false
10
+ },
11
+ "model": {
12
+ "name": "meta-llama/Llama-2-70b-hf",
13
+ "num_hidden_layers": 80,
14
+ "num_attention_heads": 64,
15
+ "num_key_value_heads": 8,
16
+ "dtype": "bfloat16",
17
+ "use_flash_attention": true,
18
+ "use_fused_adam": false
19
+ },
20
+ "training": {
21
+ "seed": 42,
22
+ "learning_rate": 0.0003,
23
+ "total_train_steps": 200,
24
+ "seq_length": 4096,
25
+ "micro_batch_size": 1,
26
+ "gradient_accumulation_steps": 1024,
27
+ "num_samples": 400000,
28
+ "max_tokens": null
29
+ },
30
+ "dataset": {
31
+ "name": "roneneldan/TinyStories",
32
+ "num_workers": 0,
33
+ "num_proc": 4
34
+ },
35
+ "checkpoint": {
36
+ "save_dir": "result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096",
37
+ "save_frequency": 300,
38
+ "load_path": "",
39
+ "hf_hub_safetensors_path": "/fsx/ferdinandmom/hf_model_ckpt/Llama-2-70b-chat-hf"
40
+ },
41
+ "logging": {
42
+ "use_wandb": true,
43
+ "project_name": "picotron",
44
+ "run_name": "70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096"
45
+ },
46
+ "environment": {
47
+ "OMP_NUM_THREADS": "1",
48
+ "TOKENIZERS_PARALLELISM": "false",
49
+ "FLASH_ATTEN": "1"
50
+ }
51
+ }
result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/job.slurm ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=job-picotron
4
+ #SBATCH --time=00:30:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --nodes=32
7
+ #SBATCH --gres=gpu:8
8
+ #SBATCH --qos=normal
9
+ #SBATCH --ntasks-per-node=1
10
+ #SBATCH --cpus-per-task=96
11
+ #SBATCH --exclusive
12
+ #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/log_%j.out
13
+ #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/log_%j.out
14
+
15
+ # Function to update status based on squeue output
16
+ update_status() {
17
+ job_id=$1
18
+ status_file=$2
19
+ # For unknown reasons, it doenst update status for pending. It only works for running
20
+ while true; do
21
+ job_status=$(squeue --job $job_id --noheader --format=%T)
22
+ echo "Job status: $job_status"
23
+ if [ -z "$job_status" ]; then
24
+ # Job has finished or is not found
25
+ break
26
+ elif [ "$job_status" = "RUNNING" ]; then
27
+ printf "running" > $status_file
28
+ break
29
+ fi
30
+ sleep 10
31
+ done
32
+ }
33
+
34
+ # Misc initializations.
35
+ echo "========================"
36
+ echo "START TIME: $(date)"
37
+ source /etc/profile.d/modules.sh
38
+ source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
39
+ conda activate /fsx/ferdinandmom/miniforge3/envs/env-picotron
40
+ echo python3 version = $(python3 --version)
41
+ echo "========================"
42
+
43
+ # Slurm stuff
44
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
45
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
46
+ export MASTER_PORT=$((1024 + RANDOM % 64511))
47
+
48
+ export TMPDIR=/scratch
49
+ export TORCH_HOME=/fsx/$USER/.cache/torch
50
+ export HF_HOME=/fsx/$USER/.cache/huggingface
51
+ export WANDB_DIR=/fsx/$USER/.cache/wandb
52
+ export CUBLAS_WORKSPACE_CONFIG=":4096:8"
53
+ export CUDA_DEVICE_MAX_CONNECTIONS="1"
54
+ export FI_PROVIDER="efa"
55
+
56
+ module load cuda/12.1
57
+
58
+ GIT_REPO="/fsx/ferdinandmom/ferdinand-hf/picotron/"
59
+ CMD="$GIT_REPO/train.py --config /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/config.json"
60
+
61
+ git checkout loading_big_model
62
+ # huggingface-cli login --token $HUGGINGFACE_TOKEN
63
+
64
+ LAUNCHER="torchrun --nproc_per_node=8 --nnode=32 --node_rank=$SLURM_NODEID --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} --rdzv_backend c10d --max_restarts 0 --tee 3"
65
+
66
+ # Checkout the bench_cluster branch
67
+ cd $GIT_REPO
68
+ # Get the current job ID
69
+ job_id=${SLURM_JOB_ID}
70
+
71
+ # Update status to "pending" or "running" in the background
72
+ update_status $job_id /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/status.txt &
73
+
74
+ # Run the main command
75
+ echo "Running command: $CMD"
76
+ srun -u $LAUNCHER $CMD
77
+ exit_status=$?
78
+
79
+ job_id=$SLURM_JOB_ID
80
+
81
+ # Update status based on the exit status of `srun`
82
+ if [ $exit_status -eq 0 ]; then
83
+ printf "completed" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/status.txt
84
+ else
85
+ if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/log_${job_id}.out; then
86
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/status.txt
87
+ elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/log_${job_id}.out; then
88
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/status.txt
89
+ elif grep -q "Timeout" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/log_${job_id}.out; then
90
+ printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/status.txt
91
+ else
92
+ printf "fail" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/status.txt
93
+ fi
94
+ fi
result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/log_12368053.out ADDED
The diff for this file is too large to render. See raw diff
 
result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/metrics.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ run_name,status,dp,tp,pp,micro_batch_size,grad_acc,seq_len,avg_tokens_s_gpu,avg_mfu
2
+ dp1_tp8_pp32_mbs1_ga1024_sl4096,running,1,8,32,1,1024,4096,127,6
result_blog_posts/70b_32_node_with_dp_tp_pp/dp1_tp8_pp32_mbs1_ga1024_sl4096/status.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ running
result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "distributed": {
3
+ "tp_size": 2,
4
+ "cp_size": 1,
5
+ "pp_size": 64,
6
+ "dp_size": 2,
7
+ "pp_engine": "1f1b",
8
+ "backend": "nccl",
9
+ "use_cpu": false
10
+ },
11
+ "model": {
12
+ "name": "meta-llama/Llama-2-70b-hf",
13
+ "num_hidden_layers": 80,
14
+ "num_attention_heads": 64,
15
+ "num_key_value_heads": 8,
16
+ "dtype": "bfloat16",
17
+ "use_flash_attention": true,
18
+ "use_fused_adam": false
19
+ },
20
+ "training": {
21
+ "seed": 42,
22
+ "learning_rate": 0.0003,
23
+ "total_train_steps": 200,
24
+ "seq_length": 4096,
25
+ "micro_batch_size": 1,
26
+ "gradient_accumulation_steps": 512,
27
+ "num_samples": 400000,
28
+ "max_tokens": null
29
+ },
30
+ "dataset": {
31
+ "name": "roneneldan/TinyStories",
32
+ "num_workers": 0,
33
+ "num_proc": 4
34
+ },
35
+ "checkpoint": {
36
+ "save_dir": "result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096",
37
+ "save_frequency": 300,
38
+ "load_path": "",
39
+ "hf_hub_safetensors_path": "/fsx/ferdinandmom/hf_model_ckpt/Llama-2-70b-chat-hf"
40
+ },
41
+ "logging": {
42
+ "use_wandb": true,
43
+ "project_name": "picotron",
44
+ "run_name": "70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096"
45
+ },
46
+ "environment": {
47
+ "OMP_NUM_THREADS": "1",
48
+ "TOKENIZERS_PARALLELISM": "false",
49
+ "FLASH_ATTEN": "1"
50
+ }
51
+ }
result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/job.slurm ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=job-picotron
4
+ #SBATCH --time=00:30:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --nodes=32
7
+ #SBATCH --gres=gpu:8
8
+ #SBATCH --qos=normal
9
+ #SBATCH --ntasks-per-node=1
10
+ #SBATCH --cpus-per-task=96
11
+ #SBATCH --exclusive
12
+ #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/log_%j.out
13
+ #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/log_%j.out
14
+
15
+ # Function to update status based on squeue output
16
+ update_status() {
17
+ job_id=$1
18
+ status_file=$2
19
+ # For unknown reasons, it doenst update status for pending. It only works for running
20
+ while true; do
21
+ job_status=$(squeue --job $job_id --noheader --format=%T)
22
+ echo "Job status: $job_status"
23
+ if [ -z "$job_status" ]; then
24
+ # Job has finished or is not found
25
+ break
26
+ elif [ "$job_status" = "RUNNING" ]; then
27
+ printf "running" > $status_file
28
+ break
29
+ fi
30
+ sleep 10
31
+ done
32
+ }
33
+
34
+ # Misc initializations.
35
+ echo "========================"
36
+ echo "START TIME: $(date)"
37
+ source /etc/profile.d/modules.sh
38
+ source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
39
+ conda activate /fsx/ferdinandmom/miniforge3/envs/env-picotron
40
+ echo python3 version = $(python3 --version)
41
+ echo "========================"
42
+
43
+ # Slurm stuff
44
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
45
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
46
+ export MASTER_PORT=$((1024 + RANDOM % 64511))
47
+
48
+ export TMPDIR=/scratch
49
+ export TORCH_HOME=/fsx/$USER/.cache/torch
50
+ export HF_HOME=/fsx/$USER/.cache/huggingface
51
+ export WANDB_DIR=/fsx/$USER/.cache/wandb
52
+ export CUBLAS_WORKSPACE_CONFIG=":4096:8"
53
+ export CUDA_DEVICE_MAX_CONNECTIONS="1"
54
+ export FI_PROVIDER="efa"
55
+
56
+ module load cuda/12.1
57
+
58
+ GIT_REPO="/fsx/ferdinandmom/ferdinand-hf/picotron/"
59
+ CMD="$GIT_REPO/train.py --config /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/config.json"
60
+
61
+ git checkout loading_big_model
62
+ # huggingface-cli login --token $HUGGINGFACE_TOKEN
63
+
64
+ LAUNCHER="torchrun --nproc_per_node=8 --nnode=32 --node_rank=$SLURM_NODEID --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} --rdzv_backend c10d --max_restarts 0 --tee 3"
65
+
66
+ # Checkout the bench_cluster branch
67
+ cd $GIT_REPO
68
+ # Get the current job ID
69
+ job_id=${SLURM_JOB_ID}
70
+
71
+ # Update status to "pending" or "running" in the background
72
+ update_status $job_id /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/status.txt &
73
+
74
+ # Run the main command
75
+ echo "Running command: $CMD"
76
+ srun -u $LAUNCHER $CMD
77
+ exit_status=$?
78
+
79
+ job_id=$SLURM_JOB_ID
80
+
81
+ # Update status based on the exit status of `srun`
82
+ if [ $exit_status -eq 0 ]; then
83
+ printf "completed" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/status.txt
84
+ else
85
+ if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/log_${job_id}.out; then
86
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/status.txt
87
+ elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/log_${job_id}.out; then
88
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/status.txt
89
+ elif grep -q "Timeout" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/log_${job_id}.out; then
90
+ printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/status.txt
91
+ else
92
+ printf "fail" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/status.txt
93
+ fi
94
+ fi
result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/log_12368050.out ADDED
The diff for this file is too large to render. See raw diff
 
result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/metrics.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ run_name,status,dp,tp,pp,micro_batch_size,grad_acc,seq_len,avg_tokens_s_gpu,avg_mfu
2
+ dp2_tp2_pp64_mbs1_ga512_sl4096,oom,2,2,64,1,512,4096,,
result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp2_pp64_mbs1_ga512_sl4096/status.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ oom
result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "distributed": {
3
+ "tp_size": 4,
4
+ "cp_size": 1,
5
+ "pp_size": 32,
6
+ "dp_size": 2,
7
+ "pp_engine": "1f1b",
8
+ "backend": "nccl",
9
+ "use_cpu": false
10
+ },
11
+ "model": {
12
+ "name": "meta-llama/Llama-2-70b-hf",
13
+ "num_hidden_layers": 80,
14
+ "num_attention_heads": 64,
15
+ "num_key_value_heads": 8,
16
+ "dtype": "bfloat16",
17
+ "use_flash_attention": true,
18
+ "use_fused_adam": false
19
+ },
20
+ "training": {
21
+ "seed": 42,
22
+ "learning_rate": 0.0003,
23
+ "total_train_steps": 200,
24
+ "seq_length": 4096,
25
+ "micro_batch_size": 1,
26
+ "gradient_accumulation_steps": 512,
27
+ "num_samples": 400000,
28
+ "max_tokens": null
29
+ },
30
+ "dataset": {
31
+ "name": "roneneldan/TinyStories",
32
+ "num_workers": 0,
33
+ "num_proc": 4
34
+ },
35
+ "checkpoint": {
36
+ "save_dir": "result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096",
37
+ "save_frequency": 300,
38
+ "load_path": "",
39
+ "hf_hub_safetensors_path": "/fsx/ferdinandmom/hf_model_ckpt/Llama-2-70b-chat-hf"
40
+ },
41
+ "logging": {
42
+ "use_wandb": true,
43
+ "project_name": "picotron",
44
+ "run_name": "70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096"
45
+ },
46
+ "environment": {
47
+ "OMP_NUM_THREADS": "1",
48
+ "TOKENIZERS_PARALLELISM": "false",
49
+ "FLASH_ATTEN": "1"
50
+ }
51
+ }
result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/job.slurm ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=job-picotron
4
+ #SBATCH --time=00:30:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --nodes=32
7
+ #SBATCH --gres=gpu:8
8
+ #SBATCH --qos=normal
9
+ #SBATCH --ntasks-per-node=1
10
+ #SBATCH --cpus-per-task=96
11
+ #SBATCH --exclusive
12
+ #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/log_%j.out
13
+ #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/log_%j.out
14
+
15
+ # Function to update status based on squeue output
16
+ update_status() {
17
+ job_id=$1
18
+ status_file=$2
19
+ # For unknown reasons, it doenst update status for pending. It only works for running
20
+ while true; do
21
+ job_status=$(squeue --job $job_id --noheader --format=%T)
22
+ echo "Job status: $job_status"
23
+ if [ -z "$job_status" ]; then
24
+ # Job has finished or is not found
25
+ break
26
+ elif [ "$job_status" = "RUNNING" ]; then
27
+ printf "running" > $status_file
28
+ break
29
+ fi
30
+ sleep 10
31
+ done
32
+ }
33
+
34
+ # Misc initializations.
35
+ echo "========================"
36
+ echo "START TIME: $(date)"
37
+ source /etc/profile.d/modules.sh
38
+ source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
39
+ conda activate /fsx/ferdinandmom/miniforge3/envs/env-picotron
40
+ echo python3 version = $(python3 --version)
41
+ echo "========================"
42
+
43
+ # Slurm stuff
44
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
45
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
46
+ export MASTER_PORT=$((1024 + RANDOM % 64511))
47
+
48
+ export TMPDIR=/scratch
49
+ export TORCH_HOME=/fsx/$USER/.cache/torch
50
+ export HF_HOME=/fsx/$USER/.cache/huggingface
51
+ export WANDB_DIR=/fsx/$USER/.cache/wandb
52
+ export CUBLAS_WORKSPACE_CONFIG=":4096:8"
53
+ export CUDA_DEVICE_MAX_CONNECTIONS="1"
54
+ export FI_PROVIDER="efa"
55
+
56
+ module load cuda/12.1
57
+
58
+ GIT_REPO="/fsx/ferdinandmom/ferdinand-hf/picotron/"
59
+ CMD="$GIT_REPO/train.py --config /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/config.json"
60
+
61
+ git checkout loading_big_model
62
+ # huggingface-cli login --token $HUGGINGFACE_TOKEN
63
+
64
+ LAUNCHER="torchrun --nproc_per_node=8 --nnode=32 --node_rank=$SLURM_NODEID --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} --rdzv_backend c10d --max_restarts 0 --tee 3"
65
+
66
+ # Checkout the bench_cluster branch
67
+ cd $GIT_REPO
68
+ # Get the current job ID
69
+ job_id=${SLURM_JOB_ID}
70
+
71
+ # Update status to "pending" or "running" in the background
72
+ update_status $job_id /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/status.txt &
73
+
74
+ # Run the main command
75
+ echo "Running command: $CMD"
76
+ srun -u $LAUNCHER $CMD
77
+ exit_status=$?
78
+
79
+ job_id=$SLURM_JOB_ID
80
+
81
+ # Update status based on the exit status of `srun`
82
+ if [ $exit_status -eq 0 ]; then
83
+ printf "completed" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/status.txt
84
+ else
85
+ if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/log_${job_id}.out; then
86
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/status.txt
87
+ elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/log_${job_id}.out; then
88
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/status.txt
89
+ elif grep -q "Timeout" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/log_${job_id}.out; then
90
+ printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/status.txt
91
+ else
92
+ printf "fail" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/status.txt
93
+ fi
94
+ fi
result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/log_12385595.out ADDED
The diff for this file is too large to render. See raw diff
 
result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/metrics.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ run_name,status,dp,tp,pp,micro_batch_size,grad_acc,seq_len,avg_tokens_s_gpu,avg_mfu
2
+ dp2_tp4_pp32_mbs1_ga512_sl4096,timeout,2,4,32,1,512,4096,,
result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp4_pp32_mbs1_ga512_sl4096/status.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ timeout
result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "distributed": {
3
+ "tp_size": 8,
4
+ "cp_size": 1,
5
+ "pp_size": 16,
6
+ "dp_size": 2,
7
+ "pp_engine": "1f1b",
8
+ "backend": "nccl",
9
+ "use_cpu": false
10
+ },
11
+ "model": {
12
+ "name": "meta-llama/Llama-2-70b-hf",
13
+ "num_hidden_layers": 80,
14
+ "num_attention_heads": 64,
15
+ "num_key_value_heads": 8,
16
+ "dtype": "bfloat16",
17
+ "use_flash_attention": true,
18
+ "use_fused_adam": false
19
+ },
20
+ "training": {
21
+ "seed": 42,
22
+ "learning_rate": 0.0003,
23
+ "total_train_steps": 200,
24
+ "seq_length": 4096,
25
+ "micro_batch_size": 1,
26
+ "gradient_accumulation_steps": 512,
27
+ "num_samples": 400000,
28
+ "max_tokens": null
29
+ },
30
+ "dataset": {
31
+ "name": "roneneldan/TinyStories",
32
+ "num_workers": 0,
33
+ "num_proc": 4
34
+ },
35
+ "checkpoint": {
36
+ "save_dir": "result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096",
37
+ "save_frequency": 300,
38
+ "load_path": "",
39
+ "hf_hub_safetensors_path": "/fsx/ferdinandmom/hf_model_ckpt/Llama-2-70b-chat-hf"
40
+ },
41
+ "logging": {
42
+ "use_wandb": true,
43
+ "project_name": "picotron",
44
+ "run_name": "70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096"
45
+ },
46
+ "environment": {
47
+ "OMP_NUM_THREADS": "1",
48
+ "TOKENIZERS_PARALLELISM": "false",
49
+ "FLASH_ATTEN": "1"
50
+ }
51
+ }
result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/job.slurm ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=job-picotron
4
+ #SBATCH --time=00:30:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --nodes=32
7
+ #SBATCH --gres=gpu:8
8
+ #SBATCH --qos=normal
9
+ #SBATCH --ntasks-per-node=1
10
+ #SBATCH --cpus-per-task=96
11
+ #SBATCH --exclusive
12
+ #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/log_%j.out
13
+ #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/log_%j.out
14
+
15
+ # Function to update status based on squeue output
16
+ update_status() {
17
+ job_id=$1
18
+ status_file=$2
19
+ # For unknown reasons, it doenst update status for pending. It only works for running
20
+ while true; do
21
+ job_status=$(squeue --job $job_id --noheader --format=%T)
22
+ echo "Job status: $job_status"
23
+ if [ -z "$job_status" ]; then
24
+ # Job has finished or is not found
25
+ break
26
+ elif [ "$job_status" = "RUNNING" ]; then
27
+ printf "running" > $status_file
28
+ break
29
+ fi
30
+ sleep 10
31
+ done
32
+ }
33
+
34
+ # Misc initializations.
35
+ echo "========================"
36
+ echo "START TIME: $(date)"
37
+ source /etc/profile.d/modules.sh
38
+ source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
39
+ conda activate /fsx/ferdinandmom/miniforge3/envs/env-picotron
40
+ echo python3 version = $(python3 --version)
41
+ echo "========================"
42
+
43
+ # Slurm stuff
44
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
45
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
46
+ export MASTER_PORT=$((1024 + RANDOM % 64511))
47
+
48
+ export TMPDIR=/scratch
49
+ export TORCH_HOME=/fsx/$USER/.cache/torch
50
+ export HF_HOME=/fsx/$USER/.cache/huggingface
51
+ export WANDB_DIR=/fsx/$USER/.cache/wandb
52
+ export CUBLAS_WORKSPACE_CONFIG=":4096:8"
53
+ export CUDA_DEVICE_MAX_CONNECTIONS="1"
54
+ export FI_PROVIDER="efa"
55
+
56
+ module load cuda/12.1
57
+
58
+ GIT_REPO="/fsx/ferdinandmom/ferdinand-hf/picotron/"
59
+ CMD="$GIT_REPO/train.py --config /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/config.json"
60
+
61
+ git checkout loading_big_model
62
+ # huggingface-cli login --token $HUGGINGFACE_TOKEN
63
+
64
+ LAUNCHER="torchrun --nproc_per_node=8 --nnode=32 --node_rank=$SLURM_NODEID --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} --rdzv_backend c10d --max_restarts 0 --tee 3"
65
+
66
+ # Checkout the bench_cluster branch
67
+ cd $GIT_REPO
68
+ # Get the current job ID
69
+ job_id=${SLURM_JOB_ID}
70
+
71
+ # Update status to "pending" or "running" in the background
72
+ update_status $job_id /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/status.txt &
73
+
74
+ # Run the main command
75
+ echo "Running command: $CMD"
76
+ srun -u $LAUNCHER $CMD
77
+ exit_status=$?
78
+
79
+ job_id=$SLURM_JOB_ID
80
+
81
+ # Update status based on the exit status of `srun`
82
+ if [ $exit_status -eq 0 ]; then
83
+ printf "completed" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/status.txt
84
+ else
85
+ if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/log_${job_id}.out; then
86
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/status.txt
87
+ elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/log_${job_id}.out; then
88
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/status.txt
89
+ elif grep -q "Timeout" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/log_${job_id}.out; then
90
+ printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/status.txt
91
+ else
92
+ printf "fail" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/status.txt
93
+ fi
94
+ fi
result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/log_12439723.out ADDED
The diff for this file is too large to render. See raw diff
 
result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/metrics.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ run_name,status,dp,tp,pp,micro_batch_size,grad_acc,seq_len,avg_tokens_s_gpu,avg_mfu
2
+ dp2_tp8_pp16_mbs1_ga512_sl4096,running,2,8,16,1,512,4096,208,9
result_blog_posts/70b_32_node_with_dp_tp_pp/dp2_tp8_pp16_mbs1_ga512_sl4096/status.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ running
result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "distributed": {
3
+ "tp_size": 2,
4
+ "cp_size": 1,
5
+ "pp_size": 4,
6
+ "dp_size": 32,
7
+ "pp_engine": "1f1b",
8
+ "backend": "nccl",
9
+ "use_cpu": false
10
+ },
11
+ "model": {
12
+ "name": "meta-llama/Llama-2-70b-hf",
13
+ "num_hidden_layers": 80,
14
+ "num_attention_heads": 64,
15
+ "num_key_value_heads": 8,
16
+ "dtype": "bfloat16",
17
+ "use_flash_attention": true,
18
+ "use_fused_adam": false
19
+ },
20
+ "training": {
21
+ "seed": 42,
22
+ "learning_rate": 0.0003,
23
+ "total_train_steps": 200,
24
+ "seq_length": 4096,
25
+ "micro_batch_size": 1,
26
+ "gradient_accumulation_steps": 32,
27
+ "num_samples": 400000,
28
+ "max_tokens": null
29
+ },
30
+ "dataset": {
31
+ "name": "roneneldan/TinyStories",
32
+ "num_workers": 0,
33
+ "num_proc": 4
34
+ },
35
+ "checkpoint": {
36
+ "save_dir": "result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096",
37
+ "save_frequency": 300,
38
+ "load_path": "",
39
+ "hf_hub_safetensors_path": "/fsx/ferdinandmom/hf_model_ckpt/Llama-2-70b-chat-hf"
40
+ },
41
+ "logging": {
42
+ "use_wandb": true,
43
+ "project_name": "picotron",
44
+ "run_name": "70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096"
45
+ },
46
+ "environment": {
47
+ "OMP_NUM_THREADS": "1",
48
+ "TOKENIZERS_PARALLELISM": "false",
49
+ "FLASH_ATTEN": "1"
50
+ }
51
+ }
result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/job.slurm ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=job-picotron
4
+ #SBATCH --time=00:30:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --nodes=32
7
+ #SBATCH --gres=gpu:8
8
+ #SBATCH --qos=normal
9
+ #SBATCH --ntasks-per-node=1
10
+ #SBATCH --cpus-per-task=96
11
+ #SBATCH --exclusive
12
+ #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/log_%j.out
13
+ #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/log_%j.out
14
+
15
+ # Function to update status based on squeue output
16
+ update_status() {
17
+ job_id=$1
18
+ status_file=$2
19
+ # For unknown reasons, it doenst update status for pending. It only works for running
20
+ while true; do
21
+ job_status=$(squeue --job $job_id --noheader --format=%T)
22
+ echo "Job status: $job_status"
23
+ if [ -z "$job_status" ]; then
24
+ # Job has finished or is not found
25
+ break
26
+ elif [ "$job_status" = "RUNNING" ]; then
27
+ printf "running" > $status_file
28
+ break
29
+ fi
30
+ sleep 10
31
+ done
32
+ }
33
+
34
+ # Misc initializations.
35
+ echo "========================"
36
+ echo "START TIME: $(date)"
37
+ source /etc/profile.d/modules.sh
38
+ source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
39
+ conda activate /fsx/ferdinandmom/miniforge3/envs/env-picotron
40
+ echo python3 version = $(python3 --version)
41
+ echo "========================"
42
+
43
+ # Slurm stuff
44
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
45
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
46
+ export MASTER_PORT=$((1024 + RANDOM % 64511))
47
+
48
+ export TMPDIR=/scratch
49
+ export TORCH_HOME=/fsx/$USER/.cache/torch
50
+ export HF_HOME=/fsx/$USER/.cache/huggingface
51
+ export WANDB_DIR=/fsx/$USER/.cache/wandb
52
+ export CUBLAS_WORKSPACE_CONFIG=":4096:8"
53
+ export CUDA_DEVICE_MAX_CONNECTIONS="1"
54
+ export FI_PROVIDER="efa"
55
+
56
+ module load cuda/12.1
57
+
58
+ GIT_REPO="/fsx/ferdinandmom/ferdinand-hf/picotron/"
59
+ CMD="$GIT_REPO/train.py --config /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/config.json"
60
+
61
+ git checkout loading_big_model
62
+ # huggingface-cli login --token $HUGGINGFACE_TOKEN
63
+
64
+ LAUNCHER="torchrun --nproc_per_node=8 --nnode=32 --node_rank=$SLURM_NODEID --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} --rdzv_backend c10d --max_restarts 0 --tee 3"
65
+
66
+ # Checkout the bench_cluster branch
67
+ cd $GIT_REPO
68
+ # Get the current job ID
69
+ job_id=${SLURM_JOB_ID}
70
+
71
+ # Update status to "pending" or "running" in the background
72
+ update_status $job_id /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/status.txt &
73
+
74
+ # Run the main command
75
+ echo "Running command: $CMD"
76
+ srun -u $LAUNCHER $CMD
77
+ exit_status=$?
78
+
79
+ job_id=$SLURM_JOB_ID
80
+
81
+ # Update status based on the exit status of `srun`
82
+ if [ $exit_status -eq 0 ]; then
83
+ printf "completed" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/status.txt
84
+ else
85
+ if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/log_${job_id}.out; then
86
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/status.txt
87
+ elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/log_${job_id}.out; then
88
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/status.txt
89
+ elif grep -q "Timeout" /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/log_${job_id}.out; then
90
+ printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/status.txt
91
+ else
92
+ printf "fail" > /fsx/ferdinandmom/ferdinand-hf/picotron/result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/status.txt
93
+ fi
94
+ fi
result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/log_12439711.out ADDED
The diff for this file is too large to render. See raw diff
 
result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/metrics.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ run_name,status,dp,tp,pp,micro_batch_size,grad_acc,seq_len,avg_tokens_s_gpu,avg_mfu
2
+ dp32_tp2_pp4_mbs1_ga32_sl4096,oom,32,2,4,1,32,4096,,
result_blog_posts/70b_32_node_with_dp_tp_pp/dp32_tp2_pp4_mbs1_ga32_sl4096/status.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ oom