applied-ai-018 commited on
Commit
370453e
·
verified ·
1 Parent(s): 2024260

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .github/ISSUE_TEMPLATE.md +15 -0
  2. experiments/bandwidth/all_reduce_bench-32gb-n4.slurm +20 -0
  3. experiments/bandwidth/all_reduce_bench-32gb-n4.txt +850 -0
  4. experiments/bandwidth/all_reduce_bench-a100-n4.slurm +24 -0
  5. experiments/bandwidth/all_reduce_bench-a100-n4.txt +1424 -0
  6. experiments/bandwidth/all_reduce_bench.py +66 -0
  7. experiments/bandwidth/n16_32gb_all_reduce_bench.txt +0 -0
  8. experiments/bandwidth/n1_16gb_all_reduce_bench.txt +264 -0
  9. experiments/bandwidth/n1_32gb_all_reduce_bench.txt +264 -0
  10. experiments/gpt2-16gb-nodes.md +1591 -0
  11. experiments/gpt2-meg-ds-3d-old/meg_ds_3d_gpt2_perf_n16.out +0 -0
  12. experiments/gpt2.md +592 -0
  13. experiments/lm-harness-evaluation.md +29 -0
  14. experiments/performance.md +7 -0
  15. experiments/tr8-104B.md +103 -0
  16. finetune/README.md +26 -0
  17. jz/archs/enwiki.md +415 -0
  18. jz/archs/gpt2.md +863 -0
  19. jz/archs/t5.md +172 -0
  20. jz/envs/README.md +662 -0
  21. jz/envs/apex/build.sh +4 -0
  22. jz/envs/deepspeed/build.sh +7 -0
  23. math/README.md +132 -0
  24. train/README.md +38 -0
  25. train/memory.md +7 -0
  26. train/sanity-checks.md +59 -0
  27. train/tr11-176B-ml/backup-schedule.md +142 -0
  28. train/tr11-176B-ml/chronicles-prequel.md +1394 -0
  29. train/tr11-176B-ml/finetune.md +15 -0
  30. train/tr11-176B-ml/smaller_models/tr11b-1B3-ml.slurm +205 -0
  31. train/tr11-176B-ml/smaller_models/tr11c-2B5-ml-continuation.slurm +204 -0
  32. train/tr11-176B-ml/smaller_models/tr11c-2B5-ml.slurm +205 -0
  33. train/tr11-176B-ml/smaller_models/tr11d-760M-ml-continuation.slurm +205 -0
  34. train/tr11-176B-ml/smaller_models/tr11d-760M-ml.slurm +205 -0
  35. train/tr11-176B-ml/smaller_models/tr11e-350M-ml-continuation.slurm +205 -0
  36. train/tr11-176B-ml/smaller_models/tr11e-350M-ml.slurm +205 -0
  37. train/tr11-176B-ml/smaller_models/tr11f-6B3-ml-continuation.slurm +204 -0
  38. train/tr11-176B-ml/smaller_models/tr11f-6B3-ml.slurm +206 -0
  39. train/tr11-176B-ml/tr11-176B-ml.slurm +221 -0
  40. train/tr3-1B3-baseline/README.md +27 -0
  41. train/tr3-1B3-baseline/tar_experiments.slurm +16 -0
  42. train/tr3-1B3-baseline/tr3-1B3-modeling-baseline-hub-sync-logs.slurm +21 -0
  43. train/tr3-1B3-baseline/tr3-1B3-modeling-baseline-hub-sync-tensorboard.slurm +22 -0
  44. train/tr3-1B3-baseline/tr3-1B3-modeling-baseline.slurm +182 -0
  45. train/tr3-1B3-baseline/tr3b-760M.slurm +180 -0
  46. train/tr3-1B3-baseline/tr3c-350M.slurm +180 -0
  47. train/tr3-1B3-baseline/tr3d-1B3-more-warmup.slurm +178 -0
  48. train/tr3-1B3-baseline/tr3d-1B3-oscar-training2.slurm +184 -0
  49. train/tr3-1B3-baseline/tr3e-1B3-c4-training2.slurm +184 -0
  50. train/tr3-1B3-baseline/tr3e-1B3-diagnostic1-warmup-c4.slurm +176 -0
.github/ISSUE_TEMPLATE.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ * Big Science version:
2
+ * Python version:
3
+ * Operating System:
4
+
5
+ ### Description
6
+
7
+ Describe what you were trying to get done.
8
+ Tell us what happened, what went wrong, and what you expected to happen.
9
+
10
+ ### What I Did
11
+
12
+ ```
13
+ Paste the command(s) you ran and the output.
14
+ If there was a crash, please include the traceback here.
15
+ ```
experiments/bandwidth/all_reduce_bench-32gb-n4.slurm ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=all_reduce_bench-32gb-n4
3
+ #SBATCH --constraint=v100-32g
4
+ #SBATCH --nodes=4
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --account=six@gpu
12
+
13
+ export LOG_FILE=all_reduce_bench-32gb-n4.txt
14
+ export NNODES=4
15
+ export GPUS_PER_NODE=4
16
+ export NCCL_DEBUG=info
17
+
18
+ export MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
19
+
20
+ srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.launch --nnodes $NNODES --nproc_per_node $GPUS_PER_NODE --node_rank $SLURM_PROCID --master_addr $MASTER_ADDR --master_port 12345 all_reduce_bench.py' 2>&1 | tee $LOG_FILE
experiments/bandwidth/all_reduce_bench-32gb-n4.txt ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated
2
+ and will be removed in future. Use torchrun.
3
+ Note that --use_env is set by default in torchrun.
4
+ If your script expects `--local_rank` argument to be set, please
5
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
6
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
7
+ further instructions
8
+
9
+ warnings.warn(
10
+ /gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated
11
+ and will be removed in future. Use torchrun.
12
+ Note that --use_env is set by default in torchrun.
13
+ If your script expects `--local_rank` argument to be set, please
14
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
15
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
16
+ further instructions
17
+
18
+ warnings.warn(
19
+ /gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated
20
+ and will be removed in future. Use torchrun.
21
+ Note that --use_env is set by default in torchrun.
22
+ If your script expects `--local_rank` argument to be set, please
23
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
24
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
25
+ further instructions
26
+
27
+ warnings.warn(
28
+ WARNING:torch.distributed.run:
29
+ *****************************************
30
+ Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
31
+ *****************************************
32
+ /gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated
33
+ and will be removed in future. Use torchrun.
34
+ Note that --use_env is set by default in torchrun.
35
+ If your script expects `--local_rank` argument to be set, please
36
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
37
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
38
+ further instructions
39
+
40
+ warnings.warn(
41
+ WARNING:torch.distributed.run:
42
+ *****************************************
43
+ Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
44
+ *****************************************
45
+ WARNING:torch.distributed.run:
46
+ *****************************************
47
+ Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
48
+ *****************************************
49
+ WARNING:torch.distributed.run:
50
+ *****************************************
51
+ Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
52
+ *****************************************
53
+ local_rank: 0
54
+ local_rank: 0
55
+ local_rank: 2
56
+ local_rank: 1
57
+ local_rank: 1
58
+ local_rank: 3
59
+ local_rank: 3
60
+ local_rank: 2
61
+ local_rank: 1
62
+ local_rank: 3
63
+ local_rank: 2
64
+ local_rank: 0
65
+ local_rank: 0
66
+ local_rank: 1
67
+ local_rank: 3
68
+ local_rank: 2
69
+ 0 data size: 4.0 GB
70
+ 4 data size: 4.0 GB
71
+ 6 data size: 4.0 GB
72
+ 11 data size: 4.0 GB
73
+ 3 data size: 4.0 GB
74
+ 10 data size: 4.0 GB
75
+ 7 data size: 4.0 GB
76
+ 5 data size: 4.0 GB
77
+ 1 data size: 4.0 GB
78
+ 2 data size: 4.0 GB
79
+ 8 data size: 4.0 GB
80
+ 15 data size: 4.0 GB
81
+ 13 data size: 4.0 GB
82
+ 12 data size: 4.0 GB
83
+ 14 data size: 4.0 GB
84
+ 9 data size: 4.0 GB
85
+ r6i6n4:257714:257714 [0] NCCL INFO Bootstrap : Using ib0:10.148.7.175<0>
86
+ r6i6n4:257714:257714 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
87
+ r6i6n4:257714:257714 [0] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.7.175<0>
88
+ r6i6n4:257714:257714 [0] NCCL INFO Using network IB
89
+ NCCL version 2.10.3+cuda11.3
90
+ r6i6n4:257715:257715 [1] NCCL INFO Bootstrap : Using ib0:10.148.7.175<0>
91
+ r6i6n5:378203:378203 [3] NCCL INFO Bootstrap : Using ib0:10.148.7.176<0>
92
+ r6i6n5:378202:378202 [2] NCCL INFO Bootstrap : Using ib0:10.148.7.176<0>
93
+ r6i6n5:378201:378201 [1] NCCL INFO Bootstrap : Using ib0:10.148.7.176<0>
94
+ r6i6n4:257715:257715 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
95
+ r6i6n5:378202:378202 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
96
+ r6i6n5:378203:378203 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
97
+ r6i6n5:378201:378201 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
98
+ r6i6n4:257715:257715 [1] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.7.175<0>
99
+ r6i6n4:257715:257715 [1] NCCL INFO Using network IB
100
+ r6i6n5:378203:378203 [3] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.7.176<0>
101
+ r6i6n5:378202:378202 [2] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.7.176<0>
102
+ r6i6n5:378203:378203 [3] NCCL INFO Using network IB
103
+ r6i6n5:378202:378202 [2] NCCL INFO Using network IB
104
+ r6i6n5:378201:378201 [1] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.7.176<0>
105
+ r6i6n5:378201:378201 [1] NCCL INFO Using network IB
106
+ r6i6n4:257717:257717 [3] NCCL INFO Bootstrap : Using ib0:10.148.7.175<0>
107
+ r6i6n4:257717:257717 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
108
+ r6i6n4:257717:257717 [3] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.7.175<0>
109
+ r6i6n4:257717:257717 [3] NCCL INFO Using network IB
110
+ r6i6n4:257716:257716 [2] NCCL INFO Bootstrap : Using ib0:10.148.7.175<0>
111
+ r6i6n4:257716:257716 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
112
+ r6i6n4:257716:257716 [2] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.7.175<0>
113
+ r6i6n4:257716:257716 [2] NCCL INFO Using network IB
114
+ r6i6n5:378200:378200 [0] NCCL INFO Bootstrap : Using ib0:10.148.7.176<0>
115
+ r6i6n5:378200:378200 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
116
+ r6i6n5:378200:378200 [0] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.7.176<0>
117
+ r6i6n5:378200:378200 [0] NCCL INFO Using network IB
118
+ r7i6n2:1370349:1370349 [2] NCCL INFO Bootstrap : Using ib0:10.148.0.95<0>
119
+ r7i6n2:1370348:1370348 [1] NCCL INFO Bootstrap : Using ib0:10.148.0.95<0>
120
+ r7i6n2:1370348:1370348 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
121
+ r7i6n2:1370349:1370349 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
122
+ r7i6n2:1370349:1370349 [2] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.0.95<0>
123
+ r7i6n2:1370349:1370349 [2] NCCL INFO Using network IB
124
+ r7i6n2:1370348:1370348 [1] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.0.95<0>
125
+ r7i6n2:1370348:1370348 [1] NCCL INFO Using network IB
126
+ r7i6n3:610063:610063 [0] NCCL INFO Bootstrap : Using ib0:10.148.0.96<0>
127
+ r7i6n3:610066:610066 [3] NCCL INFO Bootstrap : Using ib0:10.148.0.96<0>
128
+ r7i6n3:610063:610063 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
129
+ r7i6n3:610066:610066 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
130
+ r7i6n3:610066:610066 [3] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.0.96<0>
131
+ r7i6n3:610066:610066 [3] NCCL INFO Using network IB
132
+ r7i6n3:610063:610063 [0] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.0.96<0>
133
+ r7i6n3:610063:610063 [0] NCCL INFO Using network IB
134
+ r7i6n2:1370347:1370347 [0] NCCL INFO Bootstrap : Using ib0:10.148.0.95<0>
135
+ r7i6n2:1370347:1370347 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
136
+ r7i6n2:1370347:1370347 [0] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.0.95<0>
137
+ r7i6n2:1370347:1370347 [0] NCCL INFO Using network IB
138
+ r7i6n3:610065:610065 [2] NCCL INFO Bootstrap : Using ib0:10.148.0.96<0>
139
+ r7i6n3:610065:610065 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
140
+ r7i6n3:610064:610064 [1] NCCL INFO Bootstrap : Using ib0:10.148.0.96<0>
141
+ r7i6n3:610065:610065 [2] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.0.96<0>
142
+ r7i6n3:610065:610065 [2] NCCL INFO Using network IB
143
+ r7i6n3:610064:610064 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
144
+ r7i6n3:610064:610064 [1] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.0.96<0>
145
+ r7i6n3:610064:610064 [1] NCCL INFO Using network IB
146
+ r7i6n2:1370350:1370350 [3] NCCL INFO Bootstrap : Using ib0:10.148.0.95<0>
147
+ r7i6n2:1370350:1370350 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
148
+ r7i6n2:1370350:1370350 [3] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.0.95<0>
149
+ r7i6n2:1370350:1370350 [3] NCCL INFO Using network IB
150
+ r7i6n3:610064:610133 [1] NCCL INFO Trees [0] 14/-1/-1->13->12 [1] -1/-1/-1->13->12 [2] 14/-1/-1->13->12 [3] -1/-1/-1->13->12
151
+ r7i6n3:610065:610128 [2] NCCL INFO Trees [0] 15/-1/-1->14->13 [1] 15/-1/-1->14->10 [2] 15/-1/-1->14->13 [3] 15/6/-1->14->-1
152
+ r7i6n3:610066:610123 [3] NCCL INFO Trees [0] -1/-1/-1->15->14 [1] 12/-1/-1->15->14 [2] -1/-1/-1->15->14 [3] 12/-1/-1->15->14
153
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 00/04 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
154
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 01/04 : 0 3 2 1 4 7 6 5 8 11 10 9 12 15 14 13
155
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 02/04 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
156
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 03/04 : 0 3 2 1 4 7 6 5 8 11 10 9 12 15 14 13
157
+ r6i6n4:257714:257762 [0] NCCL INFO Trees [0] 1/8/-1->0->-1 [1] 1/-1/-1->0->3 [2] 1/-1/-1->0->4 [3] 1/-1/-1->0->3
158
+ r6i6n4:257716:257777 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/10/-1->2->-1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->6
159
+ r6i6n4:257715:257767 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] -1/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] -1/-1/-1->1->0
160
+ r7i6n3:610063:610122 [0] NCCL INFO Trees [0] 13/-1/-1->12->8 [1] 13/-1/-1->12->15 [2] 13/4/-1->12->-1 [3] 13/-1/-1->12->15
161
+ r6i6n4:257714:257762 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff
162
+ r6i6n4:257715:257767 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff
163
+ r6i6n4:257717:257772 [3] NCCL INFO Trees [0] -1/-1/-1->3->2 [1] 0/-1/-1->3->2 [2] -1/-1/-1->3->2 [3] 0/-1/-1->3->2
164
+ r6i6n4:257716:257777 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000
165
+ r7i6n2:1370350:1370452 [3] NCCL INFO Trees [0] -1/-1/-1->11->10 [1] 8/6/-1->11->10 [2] -1/-1/-1->11->10 [3] 8/-1/-1->11->10
166
+ r7i6n2:1370349:1370433 [2] NCCL INFO Trees [0] 11/-1/-1->10->9 [1] 11/14/-1->10->2 [2] 11/-1/-1->10->9 [3] 11/-1/-1->10->7
167
+ r6i6n4:257717:257772 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000
168
+ r7i6n2:1370347:1370447 [0] NCCL INFO Trees [0] 9/12/-1->8->0 [1] 9/-1/-1->8->11 [2] 9/-1/-1->8->5 [3] 9/-1/-1->8->11
169
+ r7i6n2:1370348:1370434 [1] NCCL INFO Trees [0] 10/4/-1->9->8 [1] -1/-1/-1->9->8 [2] 10/-1/-1->9->8 [3] -1/-1/-1->9->8
170
+ r6i6n5:378202:378256 [2] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->11 [2] 7/-1/-1->6->5 [3] 7/2/-1->6->14
171
+ r6i6n5:378203:378255 [3] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] 4/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] 4/10/-1->7->6
172
+ r6i6n5:378201:378257 [1] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] -1/-1/-1->5->4 [2] 6/8/-1->5->4 [3] -1/-1/-1->5->4
173
+ r6i6n5:378200:378262 [0] NCCL INFO Trees [0] 5/-1/-1->4->9 [1] 5/-1/-1->4->7 [2] 5/0/-1->4->12 [3] 5/-1/-1->4->7
174
+ r6i6n5:378202:378256 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000
175
+ r6i6n5:378203:378255 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000
176
+ r6i6n5:378201:378257 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff
177
+ r6i6n5:378200:378262 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff
178
+ r7i6n3:610064:610133 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff
179
+ r7i6n3:610065:610128 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000
180
+ r7i6n3:610066:610123 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000
181
+ r7i6n3:610063:610122 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff
182
+ r7i6n2:1370350:1370452 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000
183
+ r7i6n2:1370349:1370433 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000
184
+ r7i6n2:1370347:1370447 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff
185
+ r7i6n2:1370348:1370434 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff
186
+ r7i6n3:610064:610133 [1] NCCL INFO Channel 00 : 13[1c000] -> 14[88000] via P2P/IPC
187
+ r6i6n5:378201:378257 [1] NCCL INFO Channel 00 : 5[1c000] -> 6[88000] via P2P/IPC
188
+ r7i6n3:610064:610133 [1] NCCL INFO Channel 02 : 13[1c000] -> 14[88000] via P2P/IPC
189
+ r6i6n4:257715:257767 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC
190
+ r7i6n3:610065:610128 [2] NCCL INFO Channel 00 : 14[88000] -> 15[8a000] via P2P/IPC
191
+ r6i6n4:257716:257777 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC
192
+ r6i6n4:257715:257767 [1] NCCL INFO Channel 02 : 1[1c000] -> 2[88000] via P2P/IPC
193
+ r6i6n5:378201:378257 [1] NCCL INFO Channel 02 : 5[1c000] -> 6[88000] via P2P/IPC
194
+ r6i6n5:378202:378256 [2] NCCL INFO Channel 00 : 6[88000] -> 7[8a000] via P2P/IPC
195
+ r7i6n3:610065:610128 [2] NCCL INFO Channel 02 : 14[88000] -> 15[8a000] via P2P/IPC
196
+ r6i6n4:257716:257777 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC
197
+ r6i6n5:378202:378256 [2] NCCL INFO Channel 02 : 6[88000] -> 7[8a000] via P2P/IPC
198
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 00 : 15[8a000] -> 0[1a000] [receive] via NET/IB/1
199
+ r6i6n5:378200:378262 [0] NCCL INFO Channel 00 : 3[8a000] -> 4[1a000] [receive] via NET/IB/1
200
+ r7i6n3:610063:610122 [0] NCCL INFO Channel 00 : 11[8a000] -> 12[1a000] [receive] via NET/IB/1
201
+ r7i6n2:1370348:1370434 [1] NCCL INFO Channel 00 : 9[1c000] -> 10[88000] via P2P/IPC
202
+ r7i6n2:1370349:1370433 [2] NCCL INFO Channel 00 : 10[88000] -> 11[8a000] via P2P/IPC
203
+ r7i6n2:1370348:1370434 [1] NCCL INFO Channel 02 : 9[1c000] -> 10[88000] via P2P/IPC
204
+ r7i6n2:1370349:1370433 [2] NCCL INFO Channel 02 : 10[88000] -> 11[8a000] via P2P/IPC
205
+ r6i6n4:257717:257772 [3] NCCL INFO Channel 00 : 3[8a000] -> 4[1a000] [send] via NET/IB/3
206
+ r7i6n3:610066:610123 [3] NCCL INFO Channel 00 : 15[8a000] -> 0[1a000] [send] via NET/IB/3
207
+ r6i6n5:378203:378255 [3] NCCL INFO Channel 00 : 7[8a000] -> 8[1a000] [send] via NET/IB/3
208
+ r7i6n2:1370347:1370447 [0] NCCL INFO Channel 00 : 7[8a000] -> 8[1a000] [receive] via NET/IB/1
209
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 02 : 15[8a000] -> 0[1a000] [receive] via NET/IB/1
210
+ r6i6n5:378200:378262 [0] NCCL INFO Channel 02 : 3[8a000] -> 4[1a000] [receive] via NET/IB/1
211
+ r7i6n3:610063:610122 [0] NCCL INFO Channel 02 : 11[8a000] -> 12[1a000] [receive] via NET/IB/1
212
+ r7i6n2:1370350:1370452 [3] NCCL INFO Channel 00 : 11[8a000] -> 12[1a000] [send] via NET/IB/3
213
+ r6i6n4:257717:257772 [3] NCCL INFO Channel 02 : 3[8a000] -> 4[1a000] [send] via NET/IB/3
214
+ r7i6n3:610066:610123 [3] NCCL INFO Channel 02 : 15[8a000] -> 0[1a000] [send] via NET/IB/3
215
+ r6i6n4:257716:257777 [2] NCCL INFO Channel 01 : 2[88000] -> 1[1c000] via P2P/IPC
216
+ r6i6n4:257716:257777 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC
217
+ r6i6n5:378203:378255 [3] NCCL INFO Channel 02 : 7[8a000] -> 8[1a000] [send] via NET/IB/3
218
+ r7i6n2:1370347:1370447 [0] NCCL INFO Channel 02 : 7[8a000] -> 8[1a000] [receive] via NET/IB/1
219
+ r7i6n2:1370350:1370452 [3] NCCL INFO Channel 02 : 11[8a000] -> 12[1a000] [send] via NET/IB/3
220
+ r7i6n2:1370349:1370433 [2] NCCL INFO Channel 01 : 10[88000] -> 9[1c000] via P2P/IPC
221
+ r7i6n2:1370349:1370433 [2] NCCL INFO Channel 03 : 10[88000] -> 9[1c000] via P2P/IPC
222
+ r6i6n5:378202:378256 [2] NCCL INFO Channel 01 : 6[88000] -> 5[1c000] via P2P/IPC
223
+ r6i6n5:378202:378256 [2] NCCL INFO Channel 03 : 6[88000] -> 5[1c000] via P2P/IPC
224
+ r7i6n3:610063:610122 [0] NCCL INFO Channel 00 : 12[1a000] -> 13[1c000] via P2P/IPC
225
+ r7i6n3:610063:610122 [0] NCCL INFO Channel 02 : 12[1a000] -> 13[1c000] via P2P/IPC
226
+ r7i6n3:610065:610128 [2] NCCL INFO Channel 01 : 14[88000] -> 13[1c000] via P2P/IPC
227
+ r7i6n3:610065:610128 [2] NCCL INFO Channel 03 : 14[88000] -> 13[1c000] via P2P/IPC
228
+ r6i6n5:378200:378262 [0] NCCL INFO Channel 00 : 4[1a000] -> 5[1c000] via P2P/IPC
229
+ r6i6n5:378200:378262 [0] NCCL INFO Channel 02 : 4[1a000] -> 5[1c000] via P2P/IPC
230
+ r7i6n3:610064:610133 [1] NCCL INFO Channel 01 : 13[1c000] -> 0[1a000] [send] via NET/IB/1
231
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC
232
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 02 : 0[1a000] -> 1[1c000] via P2P/IPC
233
+ r7i6n3:610063:610122 [0] NCCL INFO Channel 01 : 9[1c000] -> 12[1a000] [receive] via NET/IB/1
234
+ r7i6n3:610064:610133 [1] NCCL INFO Channel 03 : 13[1c000] -> 0[1a000] [send] via NET/IB/1
235
+ r7i6n2:1370347:1370447 [0] NCCL INFO Channel 00 : 8[1a000] -> 9[1c000] via P2P/IPC
236
+ r7i6n2:1370347:1370447 [0] NCCL INFO Channel 02 : 8[1a000] -> 9[1c000] via P2P/IPC
237
+ r6i6n5:378200:378262 [0] NCCL INFO Channel 01 : 1[1c000] -> 4[1a000] [receive] via NET/IB/1
238
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 01 : 13[1c000] -> 0[1a000] [receive] via NET/IB/1
239
+ r6i6n4:257715:257767 [1] NCCL INFO Channel 01 : 1[1c000] -> 4[1a000] [send] via NET/IB/1
240
+ r7i6n3:610063:610122 [0] NCCL INFO Channel 03 : 9[1c000] -> 12[1a000] [receive] via NET/IB/1
241
+ r7i6n3:610063:610122 [0] NCCL INFO Channel 01 : 12[1a000] -> 15[8a000] via P2P/IPC
242
+ r7i6n3:610063:610122 [0] NCCL INFO Channel 03 : 12[1a000] -> 15[8a000] via P2P/IPC
243
+ r7i6n3:610066:610123 [3] NCCL INFO Channel 01 : 15[8a000] -> 14[88000] via P2P/IPC
244
+ r7i6n3:610066:610123 [3] NCCL INFO Channel 03 : 15[8a000] -> 14[88000] via P2P/IPC
245
+ r6i6n5:378200:378262 [0] NCCL INFO Channel 03 : 1[1c000] -> 4[1a000] [receive] via NET/IB/1
246
+ r7i6n3:610066:610123 [3] NCCL INFO Connected all rings
247
+ r6i6n5:378200:378262 [0] NCCL INFO Channel 01 : 4[1a000] -> 7[8a000] via P2P/IPC
248
+ r6i6n5:378200:378262 [0] NCCL INFO Channel 03 : 4[1a000] -> 7[8a000] via P2P/IPC
249
+ r7i6n2:1370347:1370447 [0] NCCL INFO Channel 01 : 5[1c000] -> 8[1a000] [receive] via NET/IB/1
250
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 03 : 13[1c000] -> 0[1a000] [receive] via NET/IB/1
251
+ r6i6n5:378203:378255 [3] NCCL INFO Channel 01 : 7[8a000] -> 6[88000] via P2P/IPC
252
+ r6i6n4:257715:257767 [1] NCCL INFO Channel 03 : 1[1c000] -> 4[1a000] [send] via NET/IB/1
253
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC
254
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 03 : 0[1a000] -> 3[8a000] via P2P/IPC
255
+ r6i6n5:378203:378255 [3] NCCL INFO Channel 03 : 7[8a000] -> 6[88000] via P2P/IPC
256
+ r6i6n5:378203:378255 [3] NCCL INFO Connected all rings
257
+ r6i6n4:257717:257772 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC
258
+ r6i6n4:257717:257772 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC
259
+ r6i6n5:378200:378262 [0] NCCL INFO Connected all rings
260
+ r6i6n5:378200:378262 [0] NCCL INFO Channel 01 : 4[1a000] -> 5[1c000] via P2P/IPC
261
+ r6i6n5:378200:378262 [0] NCCL INFO Channel 03 : 4[1a000] -> 5[1c000] via P2P/IPC
262
+ r6i6n4:257715:257767 [1] NCCL INFO Connected all rings
263
+ r6i6n4:257717:257772 [3] NCCL INFO Connected all rings
264
+ r6i6n4:257716:257777 [2] NCCL INFO Connected all rings
265
+ r7i6n2:1370347:1370447 [0] NCCL INFO Channel 03 : 5[1c000] -> 8[1a000] [receive] via NET/IB/1
266
+ r7i6n2:1370347:1370447 [0] NCCL INFO Channel 01 : 8[1a000] -> 11[8a000] via P2P/IPC
267
+ r6i6n4:257716:257777 [2] NCCL INFO Channel 01 : 2[88000] -> 3[8a000] via P2P/IPC
268
+ r6i6n4:257716:257777 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC
269
+ r6i6n5:378201:378257 [1] NCCL INFO Channel 01 : 5[1c000] -> 8[1a000] [send] via NET/IB/1
270
+ r7i6n2:1370347:1370447 [0] NCCL INFO Channel 03 : 8[1a000] -> 11[8a000] via P2P/IPC
271
+ r6i6n4:257717:257772 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC
272
+ r7i6n2:1370348:1370434 [1] NCCL INFO Channel 01 : 9[1c000] -> 12[1a000] [send] via NET/IB/1
273
+ r6i6n4:257717:257772 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC
274
+ r7i6n2:1370350:1370452 [3] NCCL INFO Channel 01 : 11[8a000] -> 10[88000] via P2P/IPC
275
+ r7i6n2:1370350:1370452 [3] NCCL INFO Channel 03 : 11[8a000] -> 10[88000] via P2P/IPC
276
+ r7i6n2:1370350:1370452 [3] NCCL INFO Connected all rings
277
+ r6i6n5:378201:378257 [1] NCCL INFO Channel 03 : 5[1c000] -> 8[1a000] [send] via NET/IB/1
278
+ r6i6n4:257716:257777 [2] NCCL INFO Channel 03 : 2[88000] -> 6[88000] [send] via NET/IB/3
279
+ r7i6n2:1370348:1370434 [1] NCCL INFO Channel 03 : 9[1c000] -> 12[1a000] [send] via NET/IB/1
280
+ r7i6n2:1370347:1370447 [0] NCCL INFO Connected all rings
281
+ r6i6n4:257714:257762 [0] NCCL INFO Connected all rings
282
+ r7i6n2:1370347:1370447 [0] NCCL INFO Channel 01 : 8[1a000] -> 9[1c000] via P2P/IPC
283
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC
284
+ r6i6n5:378201:378257 [1] NCCL INFO Connected all rings
285
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC
286
+ r7i6n3:610063:610122 [0] NCCL INFO Connected all rings
287
+ r7i6n2:1370347:1370447 [0] NCCL INFO Channel 03 : 8[1a000] -> 9[1c000] via P2P/IPC
288
+ r6i6n5:378202:378256 [2] NCCL INFO Connected all rings
289
+ r7i6n3:610064:610133 [1] NCCL INFO Connected all rings
290
+ r6i6n5:378202:378256 [2] NCCL INFO Channel 01 : 6[88000] -> 7[8a000] via P2P/IPC
291
+ r7i6n3:610063:610122 [0] NCCL INFO Channel 01 : 12[1a000] -> 13[1c000] via P2P/IPC
292
+ r7i6n2:1370348:1370434 [1] NCCL INFO Connected all rings
293
+ r6i6n4:257715:257767 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC
294
+ r6i6n4:257715:257767 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1a000] via P2P/IPC
295
+ r6i6n4:257715:257767 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC
296
+ r7i6n3:610063:610122 [0] NCCL INFO Channel 03 : 12[1a000] -> 13[1c000] via P2P/IPC
297
+ r6i6n5:378202:378256 [2] NCCL INFO Channel 03 : 6[88000] -> 7[8a000] via P2P/IPC
298
+ r7i6n3:610065:610128 [2] NCCL INFO Connected all rings
299
+ r7i6n2:1370349:1370433 [2] NCCL INFO Connected all rings
300
+ r6i6n4:257715:257767 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC
301
+ r7i6n3:610065:610128 [2] NCCL INFO Channel 01 : 14[88000] -> 15[8a000] via P2P/IPC
302
+ r7i6n2:1370349:1370433 [2] NCCL INFO Channel 01 : 10[88000] -> 11[8a000] via P2P/IPC
303
+ r7i6n3:610065:610128 [2] NCCL INFO Channel 03 : 14[88000] -> 15[8a000] via P2P/IPC
304
+ r7i6n2:1370349:1370433 [2] NCCL INFO Channel 03 : 10[88000] -> 11[8a000] via P2P/IPC
305
+ r7i6n3:610066:610123 [3] NCCL INFO Channel 01 : 15[8a000] -> 12[1a000] via P2P/IPC
306
+ r7i6n3:610064:610133 [1] NCCL INFO Channel 00 : 13[1c000] -> 12[1a000] via P2P/IPC
307
+ r7i6n3:610066:610123 [3] NCCL INFO Channel 03 : 15[8a000] -> 12[1a000] via P2P/IPC
308
+ r7i6n3:610064:610133 [1] NCCL INFO Channel 01 : 13[1c000] -> 12[1a000] via P2P/IPC
309
+ r7i6n3:610064:610133 [1] NCCL INFO Channel 02 : 13[1c000] -> 12[1a000] via P2P/IPC
310
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 02 : 0[1a000] -> 4[1a000] [send] via NET/IB/1
311
+ r7i6n3:610064:610133 [1] NCCL INFO Channel 03 : 13[1c000] -> 12[1a000] via P2P/IPC
312
+ r6i6n5:378200:378262 [0] NCCL INFO Channel 02 : 0[1a000] -> 4[1a000] [receive] via NET/IB/1
313
+ r6i6n5:378201:378257 [1] NCCL INFO Channel 02 : 5[1c000] -> 8[1a000] [send] via NET/IB/1
314
+ r6i6n5:378203:378255 [3] NCCL INFO Channel 03 : 7[8a000] -> 10[88000] [send] via NET/IB/3
315
+ r7i6n3:610063:610122 [0] NCCL INFO Channel 00 : 8[1a000] -> 12[1a000] [receive] via NET/IB/1
316
+ r6i6n5:378202:378256 [2] NCCL INFO Channel 03 : 2[88000] -> 6[88000] [receive] via NET/IB/3
317
+ r7i6n2:1370347:1370447 [0] NCCL INFO Channel 02 : 5[1c000] -> 8[1a000] [receive] via NET/IB/1
318
+ r7i6n2:1370348:1370434 [1] NCCL INFO Channel 00 : 4[1a000] -> 9[1c000] [receive] via NET/IB/1
319
+ r7i6n3:610065:610128 [2] NCCL INFO Channel 01 : 10[88000] -> 14[88000] [receive] via NET/IB/3
320
+ r7i6n2:1370350:1370452 [3] NCCL INFO Channel 01 : 6[88000] -> 11[8a000] [receive] via NET/IB/3
321
+ r7i6n2:1370349:1370433 [2] NCCL INFO Channel 03 : 7[8a000] -> 10[88000] [receive] via NET/IB/3
322
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 00 : 8[1a000] -> 0[1a000] [receive] via NET/IB/1
323
+ r6i6n5:378200:378262 [0] NCCL INFO Channel 00 : 4[1a000] -> 9[1c000] [send] via NET/IB/1
324
+ r6i6n4:257716:257777 [2] NCCL INFO Channel 01 : 10[88000] -> 2[88000] [receive] via NET/IB/3
325
+ r6i6n5:378202:378256 [2] NCCL INFO Channel 01 : 6[88000] -> 11[8a000] [send] via NET/IB/3
326
+ r7i6n2:1370347:1370447 [0] NCCL INFO Channel 00 : 8[1a000] -> 12[1a000] [send] via NET/IB/1
327
+ r6i6n5:378201:378257 [1] NCCL INFO Channel 02 : 8[1a000] -> 5[1c000] [receive] via NET/IB/1
328
+ r6i6n5:378203:378255 [3] NCCL INFO Channel 03 : 10[88000] -> 7[8a000] [receive] via NET/IB/3
329
+ r6i6n5:378203:378255 [3] NCCL INFO Channel 01 : 7[8a000] -> 4[1a000] via P2P/IPC
330
+ r7i6n2:1370349:1370433 [2] NCCL INFO Channel 01 : 10[88000] -> 14[88000] [send] via NET/IB/3
331
+ r6i6n5:378203:378255 [3] NCCL INFO Channel 03 : 7[8a000] -> 4[1a000] via P2P/IPC
332
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 00 : 0[1a000] -> 8[1a000] [send] via NET/IB/1
333
+ r6i6n4:257716:257777 [2] NCCL INFO Channel 01 : 2[88000] -> 10[88000] [send] via NET/IB/3
334
+ r6i6n5:378200:378262 [0] NCCL INFO Channel 02 : 12[1a000] -> 4[1a000] [receive] via NET/IB/1
335
+ r7i6n2:1370348:1370434 [1] NCCL INFO Channel 00 : 9[1c000] -> 4[1a000] [send] via NET/IB/1
336
+ r6i6n5:378202:378256 [2] NCCL INFO Channel 03 : 14[88000] -> 6[88000] [receive] via NET/IB/3
337
+ r7i6n3:610063:610122 [0] NCCL INFO Channel 02 : 4[1a000] -> 12[1a000] [receive] via NET/IB/1
338
+ r7i6n2:1370347:1370447 [0] NCCL INFO Channel 00 : 0[1a000] -> 8[1a000] [receive] via NET/IB/1
339
+ r7i6n2:1370350:1370452 [3] NCCL INFO Channel 01 : 11[8a000] -> 6[88000] [send] via NET/IB/3
340
+ r7i6n3:610065:610128 [2] NCCL INFO Channel 03 : 6[88000] -> 14[88000] [receive] via NET/IB/3
341
+ r7i6n2:1370349:1370433 [2] NCCL INFO Channel 01 : 2[88000] -> 10[88000] [receive] via NET/IB/3
342
+ r6i6n5:378200:378262 [0] NCCL INFO Channel 02 : 4[1a000] -> 12[1a000] [send] via NET/IB/1
343
+ r6i6n5:378202:378256 [2] NCCL INFO Channel 03 : 6[88000] -> 14[88000] [send] via NET/IB/3
344
+ r7i6n3:610063:610122 [0] NCCL INFO Channel 02 : 12[1a000] -> 4[1a000] [send] via NET/IB/1
345
+ r7i6n2:1370347:1370447 [0] NCCL INFO Channel 00 : 8[1a000] -> 0[1a000] [send] via NET/IB/1
346
+ r7i6n3:610065:610128 [2] NCCL INFO Channel 03 : 14[88000] -> 6[88000] [send] via NET/IB/3
347
+ r7i6n2:1370349:1370433 [2] NCCL INFO Channel 01 : 10[88000] -> 2[88000] [send] via NET/IB/3
348
+ r6i6n5:378200:378262 [0] NCCL INFO Channel 00 : 9[1c000] -> 4[1a000] [receive] via NET/IB/1
349
+ r7i6n3:610063:610122 [0] NCCL INFO Channel 00 : 12[1a000] -> 8[1a000] [send] via NET/IB/1
350
+ r6i6n4:257714:257762 [0] NCCL INFO Channel 02 : 4[1a000] -> 0[1a000] [receive] via NET/IB/1
351
+ r7i6n2:1370347:1370447 [0] NCCL INFO Channel 00 : 12[1a000] -> 8[1a000] [receive] via NET/IB/1
352
+ r7i6n3:610065:610128 [2] NCCL INFO Channel 01 : 14[88000] -> 10[88000] [send] via NET/IB/3
353
+ r6i6n5:378202:378256 [2] NCCL INFO Channel 01 : 11[8a000] -> 6[88000] [receive] via NET/IB/3
354
+ r7i6n2:1370348:1370434 [1] NCCL INFO Channel 00 : 9[1c000] -> 8[1a000] via P2P/IPC
355
+ r6i6n4:257716:257777 [2] NCCL INFO Channel 03 : 6[88000] -> 2[88000] [receive] via NET/IB/3
356
+ r7i6n2:1370348:1370434 [1] NCCL INFO Channel 01 : 9[1c000] -> 8[1a000] via P2P/IPC
357
+ r7i6n2:1370349:1370433 [2] NCCL INFO Channel 01 : 14[88000] -> 10[88000] [receive] via NET/IB/3
358
+ r7i6n2:1370350:1370452 [3] NCCL INFO Channel 01 : 11[8a000] -> 8[1a000] via P2P/IPC
359
+ r7i6n2:1370348:1370434 [1] NCCL INFO Channel 02 : 9[1c000] -> 8[1a000] via P2P/IPC
360
+ r7i6n2:1370350:1370452 [3] NCCL INFO Channel 03 : 11[8a000] -> 8[1a000] via P2P/IPC
361
+ r7i6n3:610066:610123 [3] NCCL INFO Channel 00 : 15[8a000] -> 14[88000] via P2P/IPC
362
+ r7i6n2:1370348:1370434 [1] NCCL INFO Channel 03 : 9[1c000] -> 8[1a000] via P2P/IPC
363
+ r7i6n3:610066:610123 [3] NCCL INFO Channel 02 : 15[8a000] -> 14[88000] via P2P/IPC
364
+ r7i6n3:610065:610128 [2] NCCL INFO Channel 00 : 14[88000] -> 13[1c000] via P2P/IPC
365
+ r7i6n3:610065:610128 [2] NCCL INFO Channel 02 : 14[88000] -> 13[1c000] via P2P/IPC
366
+ r6i6n5:378200:378262 [0] NCCL INFO Channel 02 : 4[1a000] -> 0[1a000] [send] via NET/IB/1
367
+ r7i6n3:610063:610122 [0] NCCL INFO Connected all trees
368
+ r7i6n3:610063:610122 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512
369
+ r7i6n3:610063:610122 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer
370
+ r7i6n3:610066:610123 [3] NCCL INFO Connected all trees
371
+ r7i6n3:610066:610123 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512
372
+ r7i6n3:610066:610123 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer
373
+ r6i6n5:378202:378256 [2] NCCL INFO Channel 03 : 6[88000] -> 2[88000] [send] via NET/IB/3
374
+ r7i6n3:610065:610128 [2] NCCL INFO Connected all trees
375
+ r7i6n3:610065:610128 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512
376
+ r7i6n3:610065:610128 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer
377
+ r7i6n2:1370347:1370447 [0] NCCL INFO Channel 02 : 8[1a000] -> 5[1c000] [send] via NET/IB/1
378
+ r7i6n3:610064:610133 [1] NCCL INFO Connected all trees
379
+ r7i6n3:610064:610133 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512
380
+ r7i6n3:610064:610133 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer
381
+ r7i6n3:610064:610133 [1] NCCL INFO comm 0x1471e8002fb0 rank 13 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE
382
+ r7i6n3:610063:610122 [0] NCCL INFO comm 0x148058002fb0 rank 12 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE
383
+ r7i6n3:610066:610123 [3] NCCL INFO comm 0x155220002fb0 rank 15 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE
384
+ r7i6n3:610065:610128 [2] NCCL INFO comm 0x1521c8002fb0 rank 14 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE
385
+ r7i6n2:1370349:1370433 [2] NCCL INFO Channel 03 : 10[88000] -> 7[8a000] [send] via NET/IB/3
386
+ r6i6n4:257717:257772 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC
387
+ r7i6n2:1370350:1370452 [3] NCCL INFO Channel 00 : 11[8a000] -> 10[88000] via P2P/IPC
388
+ r6i6n5:378202:378256 [2] NCCL INFO Channel 00 : 6[88000] -> 5[1c000] via P2P/IPC
389
+ r6i6n4:257717:257772 [3] NCCL INFO Channel 02 : 3[8a000] -> 2[88000] via P2P/IPC
390
+ r7i6n2:1370350:1370452 [3] NCCL INFO Channel 02 : 11[8a000] -> 10[88000] via P2P/IPC
391
+ r6i6n4:257716:257777 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC
392
+ r6i6n4:257716:257777 [2] NCCL INFO Channel 02 : 2[88000] -> 1[1c000] via P2P/IPC
393
+ r6i6n5:378202:378256 [2] NCCL INFO Channel 02 : 6[88000] -> 5[1c000] via P2P/IPC
394
+ r6i6n5:378201:378257 [1] NCCL INFO Channel 00 : 5[1c000] -> 4[1a000] via P2P/IPC
395
+ r7i6n2:1370349:1370433 [2] NCCL INFO Channel 00 : 10[88000] -> 9[1c000] via P2P/IPC
396
+ r6i6n5:378201:378257 [1] NCCL INFO Channel 01 : 5[1c000] -> 4[1a000] via P2P/IPC
397
+ r6i6n5:378203:378255 [3] NCCL INFO Channel 00 : 7[8a000] -> 6[88000] via P2P/IPC
398
+ r6i6n4:257717:257772 [3] NCCL INFO Connected all trees
399
+ r7i6n2:1370349:1370433 [2] NCCL INFO Channel 02 : 10[88000] -> 9[1c000] via P2P/IPC
400
+ r6i6n4:257717:257772 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512
401
+ r6i6n4:257717:257772 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer
402
+ r6i6n5:378201:378257 [1] NCCL INFO Channel 02 : 5[1c000] -> 4[1a000] via P2P/IPC
403
+ r6i6n5:378203:378255 [3] NCCL INFO Channel 02 : 7[8a000] -> 6[88000] via P2P/IPC
404
+ r6i6n5:378201:378257 [1] NCCL INFO Channel 03 : 5[1c000] -> 4[1a000] via P2P/IPC
405
+ r6i6n4:257714:257762 [0] NCCL INFO Connected all trees
406
+ r7i6n2:1370350:1370452 [3] NCCL INFO Connected all trees
407
+ r6i6n4:257714:257762 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512
408
+ r6i6n4:257714:257762 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer
409
+ r6i6n4:257716:257777 [2] NCCL INFO Connected all trees
410
+ r6i6n4:257716:257777 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512
411
+ r6i6n4:257716:257777 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer
412
+ r7i6n2:1370350:1370452 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512
413
+ r7i6n2:1370350:1370452 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer
414
+ r6i6n5:378203:378255 [3] NCCL INFO Connected all trees
415
+ r6i6n5:378203:378255 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512
416
+ r6i6n5:378203:378255 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer
417
+ r6i6n4:257715:257767 [1] NCCL INFO Connected all trees
418
+ r6i6n4:257715:257767 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512
419
+ r6i6n4:257715:257767 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer
420
+ r6i6n4:257714:257762 [0] NCCL INFO comm 0x145844002fb0 rank 0 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE
421
+ r6i6n4:257715:257767 [1] NCCL INFO comm 0x14c6f8002fb0 rank 1 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE
422
+ r6i6n4:257717:257772 [3] NCCL INFO comm 0x149830002fb0 rank 3 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE
423
+ r6i6n4:257716:257777 [2] NCCL INFO comm 0x151a88002fb0 rank 2 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE
424
+ r7i6n2:1370349:1370433 [2] NCCL INFO Connected all trees
425
+ r7i6n2:1370349:1370433 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512
426
+ r7i6n2:1370349:1370433 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer
427
+ r7i6n2:1370347:1370447 [0] NCCL INFO Connected all trees
428
+ r7i6n2:1370347:1370447 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512
429
+ r7i6n2:1370347:1370447 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer
430
+ r6i6n4:257714:257714 [0] NCCL INFO Launch mode Parallel
431
+ r6i6n5:378202:378256 [2] NCCL INFO Connected all trees
432
+ r6i6n5:378202:378256 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512
433
+ r6i6n5:378202:378256 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer
434
+ r7i6n2:1370348:1370434 [1] NCCL INFO Connected all trees
435
+ r7i6n2:1370348:1370434 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512
436
+ r6i6n5:378200:378262 [0] NCCL INFO Connected all trees
437
+ r7i6n2:1370348:1370434 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer
438
+ r6i6n5:378200:378262 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512
439
+ r6i6n5:378200:378262 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer
440
+ r7i6n2:1370347:1370447 [0] NCCL INFO comm 0x151020002fb0 rank 8 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE
441
+ r6i6n5:378201:378257 [1] NCCL INFO Connected all trees
442
+ r7i6n2:1370348:1370434 [1] NCCL INFO comm 0x14d418002fb0 rank 9 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE
443
+ r7i6n2:1370350:1370452 [3] NCCL INFO comm 0x154f28002fb0 rank 11 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE
444
+ r6i6n5:378201:378257 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512
445
+ r6i6n5:378201:378257 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer
446
+ r7i6n2:1370349:1370433 [2] NCCL INFO comm 0x154d48002fb0 rank 10 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE
447
+ r6i6n5:378200:378262 [0] NCCL INFO comm 0x153408002fb0 rank 4 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE
448
+ r6i6n5:378202:378256 [2] NCCL INFO comm 0x154188002fb0 rank 6 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE
449
+ r6i6n5:378201:378257 [1] NCCL INFO comm 0x14c900002fb0 rank 5 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE
450
+ r6i6n5:378203:378255 [3] NCCL INFO comm 0x14ef58002fb0 rank 7 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE
451
+ ignore me 6
452
+ 14:
453
+ duration: 1.1593 sec
454
+ algo throughput: 55204675592.2273 bps, 55.2047 Gbps
455
+ busbw: 51.7544 Gbps
456
+ ignore me 6
457
+ ignore me 6
458
+ 15:
459
+ duration: 1.2942 sec
460
+ algo throughput: 49451976290.1993 bps, 49.4520 Gbps
461
+ busbw: 46.3612 Gbps
462
+ 13:
463
+ duration: 1.1545 sec
464
+ algo throughput: 55435153048.8659 bps, 55.4352 Gbps
465
+ busbw: 51.9705 Gbps
466
+ ignore me 6
467
+ 12:
468
+ duration: 1.2946 sec
469
+ algo throughput: 49434318117.6515 bps, 49.4343 Gbps
470
+ busbw: 46.3447 Gbps
471
+ ignore me 6
472
+ ignore me 6
473
+ 9:
474
+ duration: 1.4402 sec
475
+ algo throughput: 44438492090.8732 bps, 44.4385 Gbps
476
+ busbw: 41.6611 Gbps
477
+ 10:
478
+ duration: 1.4738 sec
479
+ algo throughput: 43424520166.0441 bps, 43.4245 Gbps
480
+ ignore me 6
481
+ busbw: 40.7105 Gbps
482
+ ignore me 6
483
+ ignore me 6
484
+ 3:
485
+ duration: 1.7691 sec
486
+ algo throughput: 36177572497.7145 bps, 36.1776 Gbps
487
+ busbw: 33.9165 Gbps
488
+ 0:
489
+ 11:
490
+ duration: 1.0927 sec
491
+ duration: 1.8093 sec
492
+ algo throughput: 35371927695.6812 bps, 35.3719 Gbps
493
+ busbw: 33.1612 Gbps
494
+ algo throughput: 58569704243.7844 bps, 58.5697 Gbps
495
+ busbw: 54.9091 Gbps
496
+ ignore me 6
497
+ ignore me 6
498
+ 5:
499
+ duration: 2.0802 sec
500
+ algo throughput: 30765780785.6832 bps, 30.7658 Gbps
501
+ busbw: 28.8429 Gbps
502
+ ignore me 6
503
+ 6:
504
+ duration: 2.1418 sec
505
+ algo throughput: 29880845367.0138 bps, 29.8808 Gbps
506
+ busbw: 28.0133 Gbps
507
+ ignore me 6
508
+ 8:
509
+ duration: 1.2561 sec
510
+ algo throughput: 50951080615.8564 bps, 50.9511 Gbps
511
+ busbw: 47.7666 Gbps
512
+ 7:
513
+ duration: 1.8124 sec
514
+ algo throughput: 35312957596.3833 bps, 35.3130 Gbps
515
+ busbw: 33.1059 Gbps
516
+ ignore me 6
517
+ 4:
518
+ duration: 1.7526 sec
519
+ algo throughput: 36517122206.3803 bps, 36.5171 Gbps
520
+ busbw: 34.2348 Gbps
521
+ ignore me 6
522
+ 1:
523
+ duration: 1.8395 sec
524
+ algo throughput: 34792737240.4271 bps, 34.7927 Gbps
525
+ busbw: 32.6182 Gbps
526
+ ignore me 6
527
+ 2:
528
+ duration: 1.7637 sec
529
+ algo throughput: 36287170944.4988 bps, 36.2872 Gbps
530
+ busbw: 34.0192 Gbps
531
+ ignore me 109
532
+ 14:
533
+ duration: 0.7080 sec
534
+ algo throughput: 90399491760.9001 bps, 90.3995 Gbps
535
+ busbw: 84.7495 Gbps
536
+ ignore me 109
537
+ 15:
538
+ duration: 0.7080 sec
539
+ algo throughput: 90395163203.6951 bps, 90.3952 Gbps
540
+ busbw: 84.7455 Gbps
541
+ ignore me 109
542
+ 13:
543
+ duration: 0.7081 sec
544
+ algo throughput: 90382326783.5510 bps, 90.3823 Gbps
545
+ busbw: 84.7334 Gbps
546
+ ignore me 109
547
+ 12:
548
+ duration: 0.7080 sec
549
+ algo throughput: 90401745663.7657 bps, 90.4017 Gbps
550
+ busbw: 84.7516 Gbps
551
+ ignore me 109
552
+ 9:
553
+ duration: 0.7080 sec
554
+ algo throughput: 90395783074.5905 bps, 90.3958 Gbps
555
+ busbw: 84.7460 Gbps
556
+ ignore me 109
557
+ 10:
558
+ duration: 0.7082 sec
559
+ algo throughput: 90374224799.5715 bps, 90.3742 Gbps
560
+ busbw: 84.7258 Gbps
561
+ ignore me 109
562
+ 0:
563
+ duration: 0.7083 sec
564
+ algo throughput: 90354374863.7591 bps, 90.3544 Gbps
565
+ busbw: 84.7072 Gbps
566
+ ignore me 109
567
+ 11:
568
+ duration: 0.7084 sec
569
+ algo throughput: 90343336684.2220 bps, 90.3433 Gbps
570
+ busbw: 84.6969 Gbps
571
+ ignore me 109
572
+ 3:
573
+ duration: 0.7087 sec
574
+ algo throughput: 90311896434.2268 bps, 90.3119 Gbps
575
+ busbw: 84.6674 Gbps
576
+ ignore me 109
577
+ 8:
578
+ duration: 0.7085 sec
579
+ algo throughput: 90330088518.1323 bps, 90.3301 Gbps
580
+ busbw: 84.6845 Gbps
581
+ ignore me 109
582
+ ignore me 109
583
+ 2:
584
+ duration: 0.7085 sec
585
+ algo throughput: 90337030385.0629 bps, 90.3370 Gbps
586
+ busbw: 84.6910 Gbps
587
+ 5:
588
+ duration: 0.7088 sec
589
+ algo throughput: 90287308758.8899 bps, 90.2873 Gbps
590
+ busbw: 84.6444 Gbps
591
+ ignore me 109
592
+ ignore me 109
593
+ 1:
594
+ duration: 0.7089 sec
595
+ algo throughput: 90280901515.7927 bps, 90.2809 Gbps
596
+ busbw: 84.6383 Gbps
597
+ 6:
598
+ duration: 0.7090 sec
599
+ algo throughput: 90270047942.0345 bps, 90.2700 Gbps
600
+ busbw: 84.6282 Gbps
601
+ ignore me 109
602
+ 7:
603
+ duration: 0.7090 sec
604
+ algo throughput: 90272586091.4933 bps, 90.2726 Gbps
605
+ busbw: 84.6305 Gbps
606
+ ignore me 109
607
+ 4:
608
+ duration: 0.7085 sec
609
+ algo throughput: 90337161208.6908 bps, 90.3372 Gbps
610
+ busbw: 84.6911 Gbps
611
+ ignore me 1749
612
+ 14:
613
+ duration: 0.7107 sec
614
+ algo throughput: 90058256584.7650 bps, 90.0583 Gbps
615
+ busbw: 84.4296 Gbps
616
+ ignore me 1749
617
+ ignore me 1749
618
+ 15:
619
+ duration: 0.7107 sec
620
+ algo throughput: 90057464420.3045 bps, 90.0575 Gbps
621
+ busbw: 84.4289 Gbps
622
+ 13:
623
+ duration: 0.7106 sec
624
+ algo throughput: 90070702828.5613 bps, 90.0707 Gbps
625
+ busbw: 84.4413 Gbps
626
+ ignore me 1749
627
+ ignore me 1749
628
+ 12:
629
+ duration: 0.7106 sec
630
+ algo throughput: 90059933061.1509 bps, 90.0599 Gbps
631
+ busbw: 84.4312 Gbps
632
+ 9:
633
+ duration: 0.7105 sec
634
+ algo throughput: 90071340053.9053 bps, 90.0713 Gbps
635
+ busbw: 84.4419 Gbps
636
+ ignore me 1749
637
+ 10:
638
+ duration: 0.7106 sec
639
+ algo throughput: 90063253431.3530 bps, 90.0633 Gbps
640
+ busbw: 84.4343 Gbps
641
+ ignore me 1749
642
+ ignore me 1749
643
+ 11:
644
+ duration: 0.7106 sec
645
+ algo throughput: 90065670303.2662 bps, 90.0657 Gbps
646
+ busbw: 84.4366 Gbps
647
+ 0:
648
+ duration: 0.7107 sec
649
+ algo throughput: 90053334417.7426 bps, 90.0533 Gbps
650
+ busbw: 84.4250 Gbps
651
+ ignore me 1749
652
+ 3:
653
+ duration: 0.7106 sec
654
+ algo throughput: 90068692693.3661 bps, 90.0687 Gbps
655
+ busbw: 84.4394 Gbps
656
+ ignore me 1749
657
+ ignore me 1749
658
+ ignore me 1749
659
+ 8:
660
+ duration: 0.7105 sec
661
+ 2:
662
+ duration: 0.7104 sec
663
+ algo throughput: 90072894085.7098 bps, 90.0729 Gbps
664
+ busbw: 84.4433 Gbps
665
+ algo throughput: 90091360420.7079 bps, 90.0914 Gbps
666
+ busbw: 84.4607 Gbps
667
+ ignore me 1749
668
+ ignore me 1749
669
+ 5:
670
+ duration: 0.7104 sec
671
+ algo throughput: 90091316675.7603 bps, 90.0913 Gbps
672
+ busbw: 84.4606 Gbps
673
+ 1:
674
+ duration: 0.7103 sec
675
+ algo throughput: 90101456511.8536 bps, 90.1015 Gbps
676
+ busbw: 84.4701 Gbps
677
+ 6:
678
+ duration: 0.7103 sec
679
+ algo throughput: 90107024226.3038 bps, 90.1070 Gbps
680
+ busbw: 84.4753 Gbps
681
+ ignore me 1749
682
+ 7:
683
+ duration: 0.7103 sec
684
+ algo throughput: 90107799997.7677 bps, 90.1078 Gbps
685
+ busbw: 84.4761 Gbps
686
+ ignore me 1749
687
+ 4:
688
+ duration: 0.7103 sec
689
+ algo throughput: 90102477650.2766 bps, 90.1025 Gbps
690
+ busbw: 84.4711 Gbps
691
+ ignore me 27986
692
+ 14:
693
+ duration: 0.7092 sec
694
+ algo throughput: 90242129271.5844 bps, 90.2421 Gbps
695
+ busbw: 84.6020 Gbps
696
+ ignore me 27986
697
+ ignore me 27986
698
+ 15:
699
+ duration: 0.7093 sec
700
+ algo throughput: 90233065038.0259 bps, 90.2331 Gbps
701
+ busbw: 84.5935 Gbps
702
+ 13:
703
+ duration: 0.7093 sec
704
+ algo throughput: 90226024022.6829 bps, 90.2260 Gbps
705
+ busbw: 84.5869 Gbps
706
+ ignore me 27986
707
+ 12:
708
+ duration: 0.7092 sec
709
+ algo throughput: 90236901241.3211 bps, 90.2369 Gbps
710
+ busbw: 84.5971 Gbps
711
+ ignore me 27986
712
+ 9:
713
+ duration: 0.7093 sec
714
+ algo throughput: 90231794012.9985 bps, 90.2318 Gbps
715
+ busbw: 84.5923 Gbps
716
+ ignore me 27986
717
+ 10:
718
+ duration: 0.7093 sec
719
+ algo throughput: 90224093186.3902 bps, 90.2241 Gbps
720
+ busbw: 84.5851 Gbps
721
+ ignore me 27986
722
+ ignore me 27986
723
+ 0:
724
+ duration: 0.7092 sec
725
+ 11:
726
+ duration: 0.7092 sec
727
+ algo throughput: 90246123531.0302 bps, 90.2461 Gbps
728
+ busbw: 84.6057 Gbps
729
+ algo throughput: 90237670852.4900 bps, 90.2377 Gbps
730
+ busbw: 84.5978 Gbps
731
+ ignore me 27986
732
+ 3:
733
+ duration: 0.7093 sec
734
+ algo throughput: 90235789890.2677 bps, 90.2358 Gbps
735
+ busbw: 84.5961 Gbps
736
+ ignore me 27986
737
+ 8:
738
+ duration: 0.7092 sec
739
+ algo throughput: 90238335770.9699 bps, 90.2383 Gbps
740
+ busbw: 84.5984 Gbps
741
+ ignore me 27986
742
+ ignore me 27986
743
+ 2:
744
+ duration: 0.7093 sec
745
+ algo throughput: 90223737057.9605 bps, 90.2237 Gbps
746
+ busbw: 84.5848 Gbps
747
+ ignore me 27986
748
+ ignore me 27986
749
+ 5:
750
+ duration: 0.7093 sec
751
+ algo throughput: 90226816489.8323 bps, 90.2268 Gbps
752
+ busbw: 84.5876 Gbps
753
+ 6:
754
+ duration: 0.7093 sec
755
+ algo throughput: 90227312447.8407 bps, 90.2273 Gbps
756
+ busbw: 84.5881 Gbps
757
+ 1:
758
+ duration: 0.7094 sec
759
+ algo throughput: 90222924803.6610 bps, 90.2229 Gbps
760
+ busbw: 84.5840 Gbps
761
+ ignore me 27986
762
+ 7:
763
+ duration: 0.7093 sec
764
+ algo throughput: 90229254099.1920 bps, 90.2293 Gbps
765
+ busbw: 84.5899 Gbps
766
+ ignore me 27986
767
+ 4:
768
+ duration: 0.7094 sec
769
+ algo throughput: 90217548148.5392 bps, 90.2175 Gbps
770
+ busbw: 84.5790 Gbps
771
+ ignore me 447779
772
+ 14:
773
+ duration: 0.7079 sec
774
+ algo throughput: 90401898007.1683 bps, 90.4019 Gbps
775
+ busbw: 84.7518 Gbps
776
+ ignore me 447779
777
+ 13:
778
+ duration: 0.7078 sec
779
+ algo throughput: 90422510545.5320 bps, 90.4225 Gbps
780
+ busbw: 84.7711 Gbps
781
+ ignore me 447779
782
+ 15:
783
+ duration: 0.7080 sec
784
+ algo throughput: 90397684358.3370 bps, 90.3977 Gbps
785
+ busbw: 84.7478 Gbps
786
+ ignore me 447779
787
+ 12:
788
+ duration: 0.7080 sec
789
+ algo throughput: 90398934791.1951 bps, 90.3989 Gbps
790
+ busbw: 84.7490 Gbps
791
+ ignore me 447779
792
+ 10:
793
+ duration: 0.7079 sec
794
+ algo throughput: 90404439072.1211 bps, 90.4044 Gbps
795
+ busbw: 84.7542 Gbps
796
+ ignore me 447779
797
+ 11:
798
+ duration: 0.7078 sec
799
+ algo throughput: 90415260229.4886 bps, 90.4153 Gbps
800
+ busbw: 84.7643 Gbps
801
+ ignore me 447779
802
+ ignore me 447779
803
+ 9:
804
+ duration: 0.7086 sec
805
+ algo throughput: 90317814308.9687 bps, 90.3178 Gbps
806
+ busbw: 84.6730 Gbps
807
+ 0:
808
+ duration: 0.7081 sec
809
+ algo throughput: 90384670565.8098 bps, 90.3847 Gbps
810
+ busbw: 84.7356 Gbps
811
+ ignore me 447779
812
+ 8:
813
+ duration: 0.7080 sec
814
+ algo throughput: 90401729311.5575 bps, 90.4017 Gbps
815
+ busbw: 84.7516 Gbps
816
+ ignore me 447779
817
+ ignore me 447779
818
+ 2:
819
+ duration: 0.7081 sec
820
+ algo throughput: 90388659575.5084 bps, 90.3887 Gbps
821
+ busbw: 84.7394 Gbps
822
+ 1:
823
+ duration: 0.7080 sec
824
+ algo throughput: 90397847806.5952 bps, 90.3978 Gbps
825
+ busbw: 84.7480 Gbps
826
+ ignore me 447779
827
+ ignore me 447779
828
+ 3:
829
+ duration: 0.7086 sec
830
+ 6:
831
+ duration: 0.7083 sec
832
+ algo throughput: 90320233896.8830 bps, 90.3202 Gbps
833
+ busbw: 84.6752 Gbps
834
+ algo throughput: 90360979559.3395 bps, 90.3610 Gbps
835
+ busbw: 84.7134 Gbps
836
+ ignore me 447779
837
+ 7:
838
+ duration: 0.7083 sec
839
+ algo throughput: 90360919482.0588 bps, 90.3609 Gbps
840
+ busbw: 84.7134 Gbps
841
+ ignore me 447779
842
+ 5:
843
+ duration: 0.7087 sec
844
+ algo throughput: 90307672345.7347 bps, 90.3077 Gbps
845
+ busbw: 84.6634 Gbps
846
+ ignore me 447779
847
+ 4:
848
+ duration: 0.7085 sec
849
+ algo throughput: 90328680753.0585 bps, 90.3287 Gbps
850
+ busbw: 84.6831 Gbps
experiments/bandwidth/all_reduce_bench-a100-n4.slurm ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=all_reduce_bench-a100-n4
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=4
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:8 # number of gpus
10
+ #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+
14
+ source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml
15
+
16
+ export NNODES=$SLURM_NNODES
17
+ export GPUS_PER_NODE=8
18
+ export NCCL_DEBUG=info
19
+
20
+ export LOG_FILE=all_reduce_bench-a100-$NNODES.txt
21
+
22
+ export MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
23
+
24
+ srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.launch --nnodes $NNODES --nproc_per_node $GPUS_PER_NODE --node_rank $SLURM_PROCID --master_addr $MASTER_ADDR --master_port 12345 all_reduce_bench.py' 2>&1 | tee $LOG_FILE
experiments/bandwidth/all_reduce_bench-a100-n4.txt ADDED
@@ -0,0 +1,1424 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated
2
+ and will be removed in future. Use torchrun.
3
+ Note that --use_env is set by default in torchrun.
4
+ If your script expects `--local_rank` argument to be set, please
5
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
6
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
7
+ further instructions
8
+
9
+ warnings.warn(
10
+ /gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated
11
+ and will be removed in future. Use torchrun.
12
+ Note that --use_env is set by default in torchrun.
13
+ If your script expects `--local_rank` argument to be set, please
14
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
15
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
16
+ further instructions
17
+
18
+ warnings.warn(
19
+ /gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated
20
+ and will be removed in future. Use torchrun.
21
+ Note that --use_env is set by default in torchrun.
22
+ If your script expects `--local_rank` argument to be set, please
23
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
24
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
25
+ further instructions
26
+
27
+ warnings.warn(
28
+ /gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated
29
+ and will be removed in future. Use torchrun.
30
+ Note that --use_env is set by default in torchrun.
31
+ If your script expects `--local_rank` argument to be set, please
32
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
33
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
34
+ further instructions
35
+
36
+ warnings.warn(
37
+ WARNING:torch.distributed.run:
38
+ *****************************************
39
+ Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
40
+ *****************************************
41
+ WARNING:torch.distributed.run:
42
+ *****************************************
43
+ Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
44
+ *****************************************
45
+ WARNING:torch.distributed.run:
46
+ *****************************************
47
+ Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
48
+ *****************************************
49
+ WARNING:torch.distributed.run:
50
+ *****************************************
51
+ Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
52
+ *****************************************
53
+ local_rank: 1
54
+ local_rank: 3
55
+ local_rank: 5
56
+ local_rank: 6
57
+ local_rank: 7
58
+ local_rank: 2
59
+ local_rank: 2
60
+ local_rank: 3
61
+ local_rank: 2
62
+ local_rank: 1
63
+ local_rank: 0
64
+ local_rank: 2
65
+ local_rank: 0
66
+ local_rank: 0
67
+ local_rank: 5
68
+ local_rank: 1
69
+ local_rank: 4
70
+ local_rank: 3
71
+ local_rank: 7
72
+ local_rank: 7
73
+ local_rank: 6
74
+ local_rank: 6
75
+ local_rank: 4
76
+ local_rank: 5
77
+ local_rank: 5
78
+ local_rank: 1
79
+ local_rank: 3
80
+ local_rank: 4
81
+ local_rank: 6
82
+ local_rank: 7
83
+ local_rank: 0
84
+ local_rank: 4
85
+ 0 data size: 4.0 GB
86
+ 1 data size: 4.0 GB
87
+ 5 data size: 4.0 GB
88
+ 20 data size: 4.0 GB
89
+ 30 data size: 4.0 GB
90
+ 3 data size: 4.0 GB
91
+ 12 data size: 4.0 GB
92
+ 21 data size: 4.0 GB
93
+ 28 data size: 4.0 GB
94
+ 17 data size: 4.0 GB
95
+ 2 data size: 4.0 GB
96
+ 25 data size: 4.0 GB
97
+ 19 data size: 4.0 GB
98
+ 22 data size: 4.0 GB
99
+ 16 data size: 4.0 GB
100
+ 15 data size: 4.0 GB
101
+ 26 data size: 4.0 GB
102
+ 27 data size: 4.0 GB
103
+ 6 data size: 4.0 GB
104
+ 24 data size: 4.0 GB
105
+ 9 data size: 4.0 GB
106
+ 29 data size: 4.0 GB
107
+ 23 data size: 4.0 GB
108
+ 31 data size: 4.0 GB
109
+ 14 data size: 4.0 GB
110
+ 7 data size: 4.0 GB
111
+ 18 data size: 4.0 GB
112
+ 8 data size: 4.0 GB
113
+ 11 data size: 4.0 GB
114
+ 10 data size: 4.0 GB
115
+ 13 data size: 4.0 GB
116
+ 4 data size: 4.0 GB
117
+ jean-zay-iam37:261379:261379 [0] NCCL INFO Bootstrap : Using ib0:10.148.8.203<0>
118
+ jean-zay-iam37:261379:261379 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
119
+ jean-zay-iam37:261379:261379 [0] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.203<0>
120
+ jean-zay-iam37:261379:261379 [0] NCCL INFO Using network IB
121
+ NCCL version 2.10.3+cuda11.3
122
+ jean-zay-iam37:261380:261380 [1] NCCL INFO Bootstrap : Using ib0:10.148.8.203<0>
123
+ jean-zay-iam37:261383:261383 [4] NCCL INFO Bootstrap : Using ib0:10.148.8.203<0>
124
+ jean-zay-iam37:261384:261384 [5] NCCL INFO Bootstrap : Using ib0:10.148.8.203<0>
125
+ jean-zay-iam37:261386:261386 [7] NCCL INFO Bootstrap : Using ib0:10.148.8.203<0>
126
+ jean-zay-iam37:261381:261381 [2] NCCL INFO Bootstrap : Using ib0:10.148.8.203<0>
127
+ jean-zay-iam37:261382:261382 [3] NCCL INFO Bootstrap : Using ib0:10.148.8.203<0>
128
+ jean-zay-iam41:276753:276753 [7] NCCL INFO Bootstrap : Using ib0:10.148.8.206<0>
129
+ jean-zay-iam41:276748:276748 [2] NCCL INFO Bootstrap : Using ib0:10.148.8.206<0>
130
+ jean-zay-iam41:276747:276747 [1] NCCL INFO Bootstrap : Using ib0:10.148.8.206<0>
131
+ jean-zay-iam41:276752:276752 [6] NCCL INFO Bootstrap : Using ib0:10.148.8.206<0>
132
+ jean-zay-iam41:276750:276750 [4] NCCL INFO Bootstrap : Using ib0:10.148.8.206<0>
133
+ jean-zay-iam41:276751:276751 [5] NCCL INFO Bootstrap : Using ib0:10.148.8.206<0>
134
+ jean-zay-iam41:276746:276746 [0] NCCL INFO Bootstrap : Using ib0:10.148.8.206<0>
135
+ jean-zay-iam41:276749:276749 [3] NCCL INFO Bootstrap : Using ib0:10.148.8.206<0>
136
+ jean-zay-iam37:261384:261384 [5] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
137
+ jean-zay-iam52:263016:263016 [1] NCCL INFO Bootstrap : Using ib0:10.148.8.217<0>
138
+ jean-zay-iam52:263017:263017 [2] NCCL INFO Bootstrap : Using ib0:10.148.8.217<0>
139
+ jean-zay-iam52:263018:263018 [3] NCCL INFO Bootstrap : Using ib0:10.148.8.217<0>
140
+ jean-zay-iam52:263021:263021 [6] NCCL INFO Bootstrap : Using ib0:10.148.8.217<0>
141
+ jean-zay-iam52:263019:263019 [4] NCCL INFO Bootstrap : Using ib0:10.148.8.217<0>
142
+ jean-zay-iam52:263015:263015 [0] NCCL INFO Bootstrap : Using ib0:10.148.8.217<0>
143
+ jean-zay-iam52:263020:263020 [5] NCCL INFO Bootstrap : Using ib0:10.148.8.217<0>
144
+ jean-zay-iam52:263022:263022 [7] NCCL INFO Bootstrap : Using ib0:10.148.8.217<0>
145
+ jean-zay-iam37:261386:261386 [7] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
146
+ jean-zay-iam37:261381:261381 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
147
+ jean-zay-iam37:261380:261380 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
148
+ jean-zay-iam37:261383:261383 [4] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
149
+ jean-zay-iam40:289973:289973 [6] NCCL INFO Bootstrap : Using ib0:10.148.8.205<0>
150
+ jean-zay-iam40:289967:289967 [0] NCCL INFO Bootstrap : Using ib0:10.148.8.205<0>
151
+ jean-zay-iam40:289969:289969 [2] NCCL INFO Bootstrap : Using ib0:10.148.8.205<0>
152
+ jean-zay-iam40:289971:289971 [4] NCCL INFO Bootstrap : Using ib0:10.148.8.205<0>
153
+ jean-zay-iam40:289970:289970 [3] NCCL INFO Bootstrap : Using ib0:10.148.8.205<0>
154
+ jean-zay-iam40:289972:289972 [5] NCCL INFO Bootstrap : Using ib0:10.148.8.205<0>
155
+ jean-zay-iam40:289968:289968 [1] NCCL INFO Bootstrap : Using ib0:10.148.8.205<0>
156
+ jean-zay-iam40:289974:289974 [7] NCCL INFO Bootstrap : Using ib0:10.148.8.205<0>
157
+ jean-zay-iam37:261382:261382 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
158
+ jean-zay-iam41:276751:276751 [5] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
159
+ jean-zay-iam41:276749:276749 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
160
+ jean-zay-iam41:276746:276746 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
161
+ jean-zay-iam41:276753:276753 [7] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
162
+ jean-zay-iam41:276748:276748 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
163
+ jean-zay-iam41:276752:276752 [6] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
164
+ jean-zay-iam41:276747:276747 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
165
+ jean-zay-iam41:276750:276750 [4] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
166
+ jean-zay-iam52:263017:263017 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
167
+ jean-zay-iam52:263016:263016 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
168
+ jean-zay-iam40:289968:289968 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
169
+ jean-zay-iam40:289974:289974 [7] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
170
+ jean-zay-iam52:263021:263021 [6] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
171
+ jean-zay-iam52:263015:263015 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
172
+ jean-zay-iam52:263019:263019 [4] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
173
+ jean-zay-iam52:263020:263020 [5] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
174
+ jean-zay-iam40:289973:289973 [6] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
175
+ jean-zay-iam52:263018:263018 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
176
+ jean-zay-iam52:263022:263022 [7] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
177
+ jean-zay-iam40:289972:289972 [5] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
178
+ jean-zay-iam40:289969:289969 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
179
+ jean-zay-iam40:289971:289971 [4] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
180
+ jean-zay-iam40:289967:289967 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
181
+ jean-zay-iam40:289970:289970 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
182
+ jean-zay-iam37:261384:261384 [5] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.203<0>
183
+ jean-zay-iam37:261384:261384 [5] NCCL INFO Using network IB
184
+ jean-zay-iam37:261380:261380 [1] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.203<0>
185
+ jean-zay-iam37:261380:261380 [1] NCCL INFO Using network IB
186
+ jean-zay-iam37:261383:261383 [4] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.203<0>
187
+ jean-zay-iam37:261381:261381 [2] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.203<0>
188
+ jean-zay-iam37:261386:261386 [7] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.203<0>
189
+ jean-zay-iam37:261381:261381 [2] NCCL INFO Using network IB
190
+ jean-zay-iam37:261383:261383 [4] NCCL INFO Using network IB
191
+ jean-zay-iam37:261386:261386 [7] NCCL INFO Using network IB
192
+ jean-zay-iam37:261382:261382 [3] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.203<0>
193
+ jean-zay-iam37:261382:261382 [3] NCCL INFO Using network IB
194
+ jean-zay-iam41:276751:276751 [5] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.206<0>
195
+ jean-zay-iam41:276751:276751 [5] NCCL INFO Using network IB
196
+ jean-zay-iam41:276748:276748 [2] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.206<0>
197
+ jean-zay-iam41:276748:276748 [2] NCCL INFO Using network IB
198
+ jean-zay-iam41:276747:276747 [1] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.206<0>
199
+ jean-zay-iam41:276747:276747 [1] NCCL INFO Using network IB
200
+ jean-zay-iam41:276752:276752 [6] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.206<0>
201
+ jean-zay-iam41:276746:276746 [0] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.206<0>
202
+ jean-zay-iam41:276749:276749 [3] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.206<0>
203
+ jean-zay-iam41:276753:276753 [7] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.206<0>
204
+ jean-zay-iam41:276749:276749 [3] NCCL INFO Using network IB
205
+ jean-zay-iam41:276750:276750 [4] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.206<0>
206
+ jean-zay-iam41:276746:276746 [0] NCCL INFO Using network IB
207
+ jean-zay-iam41:276753:276753 [7] NCCL INFO Using network IB
208
+ jean-zay-iam41:276752:276752 [6] NCCL INFO Using network IB
209
+ jean-zay-iam41:276750:276750 [4] NCCL INFO Using network IB
210
+ jean-zay-iam52:263022:263022 [7] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.217<0>
211
+ jean-zay-iam52:263022:263022 [7] NCCL INFO Using network IB
212
+ jean-zay-iam52:263019:263019 [4] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.217<0>
213
+ jean-zay-iam52:263019:263019 [4] NCCL INFO Using network IB
214
+ jean-zay-iam52:263021:263021 [6] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.217<0>
215
+ jean-zay-iam52:263021:263021 [6] NCCL INFO Using network IB
216
+ jean-zay-iam52:263017:263017 [2] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.217<0>
217
+ jean-zay-iam52:263017:263017 [2] NCCL INFO Using network IB
218
+ jean-zay-iam52:263020:263020 [5] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.217<0>
219
+ jean-zay-iam52:263020:263020 [5] NCCL INFO Using network IB
220
+ jean-zay-iam52:263015:263015 [0] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.217<0>
221
+ jean-zay-iam52:263018:263018 [3] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.217<0>
222
+ jean-zay-iam52:263015:263015 [0] NCCL INFO Using network IB
223
+ jean-zay-iam52:263018:263018 [3] NCCL INFO Using network IB
224
+ jean-zay-iam52:263016:263016 [1] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.217<0>
225
+ jean-zay-iam52:263016:263016 [1] NCCL INFO Using network IB
226
+ jean-zay-iam40:289968:289968 [1] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.205<0>
227
+ jean-zay-iam40:289968:289968 [1] NCCL INFO Using network IB
228
+ jean-zay-iam40:289972:289972 [5] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.205<0>
229
+ jean-zay-iam40:289972:289972 [5] NCCL INFO Using network IB
230
+ jean-zay-iam40:289969:289969 [2] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.205<0>
231
+ jean-zay-iam40:289969:289969 [2] NCCL INFO Using network IB
232
+ jean-zay-iam40:289973:289973 [6] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.205<0>
233
+ jean-zay-iam40:289973:289973 [6] NCCL INFO Using network IB
234
+ jean-zay-iam40:289970:289970 [3] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.205<0>
235
+ jean-zay-iam40:289970:289970 [3] NCCL INFO Using network IB
236
+ jean-zay-iam40:289967:289967 [0] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.205<0>
237
+ jean-zay-iam40:289967:289967 [0] NCCL INFO Using network IB
238
+ jean-zay-iam40:289974:289974 [7] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.205<0>
239
+ jean-zay-iam40:289971:289971 [4] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.205<0>
240
+ jean-zay-iam40:289974:289974 [7] NCCL INFO Using network IB
241
+ jean-zay-iam40:289971:289971 [4] NCCL INFO Using network IB
242
+ jean-zay-iam37:261385:261385 [6] NCCL INFO Bootstrap : Using ib0:10.148.8.203<0>
243
+ jean-zay-iam37:261385:261385 [6] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
244
+ jean-zay-iam37:261385:261385 [6] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.203<0>
245
+ jean-zay-iam37:261385:261385 [6] NCCL INFO Using network IB
246
+ jean-zay-iam52:263016:263142 [1] NCCL INFO Trees [0] 26/-1/-1->25->24 [1] 26/-1/-1->25->24
247
+ jean-zay-iam52:263016:263142 [1] NCCL INFO Setting affinity for GPU 1 to ffffffff
248
+ jean-zay-iam52:263017:263138 [2] NCCL INFO Trees [0] 27/-1/-1->26->25 [1] 27/-1/-1->26->25
249
+ jean-zay-iam52:263017:263138 [2] NCCL INFO Setting affinity for GPU 2 to ffffffff
250
+ jean-zay-iam52:263018:263141 [3] NCCL INFO Trees [0] 28/-1/-1->27->26 [1] 28/-1/-1->27->26
251
+ jean-zay-iam52:263018:263141 [3] NCCL INFO Setting affinity for GPU 3 to ffffffff
252
+ jean-zay-iam52:263021:263137 [6] NCCL INFO Trees [0] 31/-1/-1->30->29 [1] 31/-1/-1->30->29
253
+ jean-zay-iam52:263020:263139 [5] NCCL INFO Trees [0] 30/-1/-1->29->28 [1] 30/-1/-1->29->28
254
+ jean-zay-iam52:263021:263137 [6] NCCL INFO Setting affinity for GPU 6 to ff,00000000
255
+ jean-zay-iam52:263020:263139 [5] NCCL INFO Setting affinity for GPU 5 to ff,00000000
256
+ jean-zay-iam52:263019:263136 [4] NCCL INFO Trees [0] 29/-1/-1->28->27 [1] 29/-1/-1->28->27
257
+ jean-zay-iam52:263022:263135 [7] NCCL INFO Trees [0] -1/-1/-1->31->30 [1] -1/-1/-1->31->30
258
+ jean-zay-iam52:263019:263136 [4] NCCL INFO Setting affinity for GPU 4 to ff,00000000
259
+ jean-zay-iam52:263022:263135 [7] NCCL INFO Setting affinity for GPU 7 to ff,00000000
260
+ jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 00/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
261
+ jean-zay-iam37:261381:261500 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1
262
+ jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 01/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
263
+ jean-zay-iam37:261379:261471 [0] NCCL INFO Trees [0] 1/16/-1->0->-1 [1] 1/-1/-1->0->8
264
+ jean-zay-iam37:261380:261497 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0
265
+ jean-zay-iam37:261381:261500 [2] NCCL INFO Setting affinity for GPU 2 to ffffffff
266
+ jean-zay-iam37:261379:261471 [0] NCCL INFO Setting affinity for GPU 0 to ffffffff
267
+ jean-zay-iam37:261380:261497 [1] NCCL INFO Setting affinity for GPU 1 to ffffffff
268
+ jean-zay-iam41:276749:276869 [3] NCCL INFO Trees [0] 20/-1/-1->19->18 [1] 20/-1/-1->19->18
269
+ jean-zay-iam41:276751:276865 [5] NCCL INFO Trees [0] 22/-1/-1->21->20 [1] 22/-1/-1->21->20
270
+ jean-zay-iam41:276749:276869 [3] NCCL INFO Setting affinity for GPU 3 to ffffffff
271
+ jean-zay-iam41:276750:276871 [4] NCCL INFO Trees [0] 21/-1/-1->20->19 [1] 21/-1/-1->20->19
272
+ jean-zay-iam41:276752:276872 [6] NCCL INFO Trees [0] 23/-1/-1->22->21 [1] 23/-1/-1->22->21
273
+ jean-zay-iam52:263015:263140 [0] NCCL INFO Trees [0] 25/-1/-1->24->16 [1] 25/8/-1->24->-1
274
+ jean-zay-iam52:263015:263140 [0] NCCL INFO Setting affinity for GPU 0 to ffffffff
275
+ jean-zay-iam40:289974:290092 [7] NCCL INFO Trees [0] -1/-1/-1->15->14 [1] -1/-1/-1->15->14
276
+ jean-zay-iam41:276751:276865 [5] NCCL INFO Setting affinity for GPU 5 to ff,00000000
277
+ jean-zay-iam41:276750:276871 [4] NCCL INFO Setting affinity for GPU 4 to ff,00000000
278
+ jean-zay-iam41:276752:276872 [6] NCCL INFO Setting affinity for GPU 6 to ff,00000000
279
+ jean-zay-iam41:276753:276870 [7] NCCL INFO Trees [0] -1/-1/-1->23->22 [1] -1/-1/-1->23->22
280
+ jean-zay-iam41:276753:276870 [7] NCCL INFO Setting affinity for GPU 7 to ff,00000000
281
+ jean-zay-iam40:289974:290092 [7] NCCL INFO Setting affinity for GPU 7 to ff,00000000
282
+ jean-zay-iam41:276747:276867 [1] NCCL INFO Trees [0] 18/8/-1->17->16 [1] 18/-1/-1->17->16
283
+ jean-zay-iam41:276748:276866 [2] NCCL INFO Trees [0] 19/-1/-1->18->17 [1] 19/-1/-1->18->17
284
+ jean-zay-iam41:276746:276868 [0] NCCL INFO Trees [0] 17/24/-1->16->0 [1] 17/-1/-1->16->9
285
+ jean-zay-iam41:276748:276866 [2] NCCL INFO Setting affinity for GPU 2 to ffffffff
286
+ jean-zay-iam41:276746:276868 [0] NCCL INFO Setting affinity for GPU 0 to ffffffff
287
+ jean-zay-iam41:276747:276867 [1] NCCL INFO Setting affinity for GPU 1 to ffffffff
288
+ jean-zay-iam37:261382:261501 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2
289
+ jean-zay-iam37:261382:261501 [3] NCCL INFO Setting affinity for GPU 3 to ffffffff
290
+ jean-zay-iam37:261383:261499 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3
291
+ jean-zay-iam37:261383:261499 [4] NCCL INFO Setting affinity for GPU 4 to ff,00000000
292
+ jean-zay-iam37:261385:261506 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5
293
+ jean-zay-iam37:261385:261506 [6] NCCL INFO Setting affinity for GPU 6 to ff,00000000
294
+ jean-zay-iam37:261384:261496 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4
295
+ jean-zay-iam37:261384:261496 [5] NCCL INFO Setting affinity for GPU 5 to ff,00000000
296
+ jean-zay-iam37:261386:261498 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6
297
+ jean-zay-iam37:261386:261498 [7] NCCL INFO Setting affinity for GPU 7 to ff,00000000
298
+ jean-zay-iam40:289967:290091 [0] NCCL INFO Trees [0] 9/-1/-1->8->17 [1] 9/0/-1->8->24
299
+ jean-zay-iam40:289967:290091 [0] NCCL INFO Setting affinity for GPU 0 to ffffffff
300
+ jean-zay-iam40:289969:290087 [2] NCCL INFO Trees [0] 11/-1/-1->10->9 [1] 11/-1/-1->10->9
301
+ jean-zay-iam40:289970:290090 [3] NCCL INFO Trees [0] 12/-1/-1->11->10 [1] 12/-1/-1->11->10
302
+ jean-zay-iam40:289968:290086 [1] NCCL INFO Trees [0] 10/-1/-1->9->8 [1] 10/16/-1->9->8
303
+ jean-zay-iam40:289973:290089 [6] NCCL INFO Trees [0] 15/-1/-1->14->13 [1] 15/-1/-1->14->13
304
+ jean-zay-iam40:289969:290087 [2] NCCL INFO Setting affinity for GPU 2 to ffffffff
305
+ jean-zay-iam40:289970:290090 [3] NCCL INFO Setting affinity for GPU 3 to ffffffff
306
+ jean-zay-iam40:289973:290089 [6] NCCL INFO Setting affinity for GPU 6 to ff,00000000
307
+ jean-zay-iam40:289968:290086 [1] NCCL INFO Setting affinity for GPU 1 to ffffffff
308
+ jean-zay-iam40:289972:290088 [5] NCCL INFO Trees [0] 14/-1/-1->13->12 [1] 14/-1/-1->13->12
309
+ jean-zay-iam40:289971:290093 [4] NCCL INFO Trees [0] 13/-1/-1->12->11 [1] 13/-1/-1->12->11
310
+ jean-zay-iam40:289972:290088 [5] NCCL INFO Setting affinity for GPU 5 to ff,00000000
311
+ jean-zay-iam40:289971:290093 [4] NCCL INFO Setting affinity for GPU 4 to ff,00000000
312
+ jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 00 : 31[cb000] -> 0[7000] [receive] via NET/IB/1
313
+ jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 00 : 7[cb000] -> 8[7000] [receive] via NET/IB/1
314
+ jean-zay-iam52:263015:263140 [0] NCCL INFO Channel 00 : 23[cb000] -> 24[7000] [receive] via NET/IB/1
315
+ jean-zay-iam52:263016:263142 [1] NCCL INFO Channel 00 : 25[b000] -> 26[48000] via P2P/IPC/read
316
+ jean-zay-iam52:263017:263138 [2] NCCL INFO Channel 00 : 26[48000] -> 27[4c000] via P2P/IPC/read
317
+ jean-zay-iam52:263018:263141 [3] NCCL INFO Channel 00 : 27[4c000] -> 28[88000] via P2P/IPC/read
318
+ jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 00 : 15[cb000] -> 16[7000] [receive] via NET/IB/1
319
+ jean-zay-iam52:263020:263139 [5] NCCL INFO Channel 00 : 29[8b000] -> 30[c8000] via P2P/IPC/read
320
+ jean-zay-iam40:289974:290092 [7] NCCL INFO Channel 00 : 15[cb000] -> 16[7000] [send] via NET/IB/3
321
+ jean-zay-iam41:276752:276872 [6] NCCL INFO Channel 00 : 22[c8000] -> 23[cb000] via P2P/IPC/read
322
+ jean-zay-iam52:263019:263136 [4] NCCL INFO Channel 00 : 28[88000] -> 29[8b000] via P2P/IPC/read
323
+ jean-zay-iam52:263021:263137 [6] NCCL INFO Channel 00 : 30[c8000] -> 31[cb000] via P2P/IPC/read
324
+ jean-zay-iam52:263016:263142 [1] NCCL INFO Channel 01 : 25[b000] -> 26[48000] via P2P/IPC/read
325
+ jean-zay-iam41:276750:276871 [4] NCCL INFO Channel 00 : 20[88000] -> 21[8b000] via P2P/IPC/read
326
+ jean-zay-iam52:263015:263140 [0] NCCL INFO Channel 01 : 23[cb000] -> 24[7000] [receive] via NET/IB/1
327
+ jean-zay-iam52:263017:263138 [2] NCCL INFO Channel 01 : 26[48000] -> 27[4c000] via P2P/IPC/read
328
+ jean-zay-iam41:276751:276865 [5] NCCL INFO Channel 00 : 21[8b000] -> 22[c8000] via P2P/IPC/read
329
+ jean-zay-iam52:263022:263135 [7] NCCL INFO Channel 00 : 31[cb000] -> 0[7000] [send] via NET/IB/3
330
+ jean-zay-iam41:276749:276869 [3] NCCL INFO Channel 00 : 19[4c000] -> 20[88000] via P2P/IPC/read
331
+ jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 01 : 31[cb000] -> 0[7000] [receive] via NET/IB/1
332
+ jean-zay-iam37:261381:261500 [2] NCCL INFO Channel 00 : 2[48000] -> 3[4c000] via P2P/IPC/read
333
+ jean-zay-iam52:263018:263141 [3] NCCL INFO Channel 01 : 27[4c000] -> 28[88000] via P2P/IPC/read
334
+ jean-zay-iam41:276747:276867 [1] NCCL INFO Channel 00 : 17[b000] -> 18[48000] via P2P/IPC/read
335
+ jean-zay-iam41:276748:276866 [2] NCCL INFO Channel 00 : 18[48000] -> 19[4c000] via P2P/IPC/read
336
+ jean-zay-iam52:263020:263139 [5] NCCL INFO Channel 01 : 29[8b000] -> 30[c8000] via P2P/IPC/read
337
+ jean-zay-iam37:261380:261497 [1] NCCL INFO Channel 00 : 1[b000] -> 2[48000] via P2P/IPC/read
338
+ jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 01 : 15[cb000] -> 16[7000] [receive] via NET/IB/1
339
+ jean-zay-iam37:261382:261501 [3] NCCL INFO Channel 00 : 3[4c000] -> 4[88000] via P2P/IPC/read
340
+ jean-zay-iam37:261385:261506 [6] NCCL INFO Channel 00 : 6[c8000] -> 7[cb000] via P2P/IPC/read
341
+ jean-zay-iam41:276752:276872 [6] NCCL INFO Channel 01 : 22[c8000] -> 23[cb000] via P2P/IPC/read
342
+ jean-zay-iam52:263019:263136 [4] NCCL INFO Channel 01 : 28[88000] -> 29[8b000] via P2P/IPC/read
343
+ jean-zay-iam52:263021:263137 [6] NCCL INFO Channel 01 : 30[c8000] -> 31[cb000] via P2P/IPC/read
344
+ jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 01 : 7[cb000] -> 8[7000] [receive] via NET/IB/1
345
+ jean-zay-iam40:289968:290086 [1] NCCL INFO Channel 00 : 9[b000] -> 10[48000] via P2P/IPC/read
346
+ jean-zay-iam52:263015:263140 [0] NCCL INFO Channel 00 : 24[7000] -> 25[b000] via P2P/IPC/read
347
+ jean-zay-iam41:276753:276870 [7] NCCL INFO Channel 00 : 23[cb000] -> 24[7000] [send] via NET/IB/3
348
+ jean-zay-iam41:276750:276871 [4] NCCL INFO Channel 01 : 20[88000] -> 21[8b000] via P2P/IPC/read
349
+ jean-zay-iam40:289970:290090 [3] NCCL INFO Channel 00 : 11[4c000] -> 12[88000] via P2P/IPC/read
350
+ jean-zay-iam40:289969:290087 [2] NCCL INFO Channel 00 : 10[48000] -> 11[4c000] via P2P/IPC/read
351
+ jean-zay-iam40:289973:290089 [6] NCCL INFO Channel 00 : 14[c8000] -> 15[cb000] via P2P/IPC/read
352
+ jean-zay-iam40:289972:290088 [5] NCCL INFO Channel 00 : 13[8b000] -> 14[c8000] via P2P/IPC/read
353
+ jean-zay-iam37:261386:261498 [7] NCCL INFO Channel 00 : 7[cb000] -> 8[7000] [send] via NET/IB/3
354
+ jean-zay-iam41:276751:276865 [5] NCCL INFO Channel 01 : 21[8b000] -> 22[c8000] via P2P/IPC/read
355
+ jean-zay-iam41:276749:276869 [3] NCCL INFO Channel 01 : 19[4c000] -> 20[88000] via P2P/IPC/read
356
+ jean-zay-iam40:289974:290092 [7] NCCL INFO Channel 01 : 15[cb000] -> 16[7000] [send] via NET/IB/3
357
+ jean-zay-iam40:289971:290093 [4] NCCL INFO Channel 00 : 12[88000] -> 13[8b000] via P2P/IPC/read
358
+ jean-zay-iam41:276747:276867 [1] NCCL INFO Channel 01 : 17[b000] -> 18[48000] via P2P/IPC/read
359
+ jean-zay-iam41:276748:276866 [2] NCCL INFO Channel 01 : 18[48000] -> 19[4c000] via P2P/IPC/read
360
+ jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 00 : 16[7000] -> 17[b000] via P2P/IPC/read
361
+ jean-zay-iam37:261383:261499 [4] NCCL INFO Channel 00 : 4[88000] -> 5[8b000] via P2P/IPC/read
362
+ jean-zay-iam52:263022:263135 [7] NCCL INFO Channel 01 : 31[cb000] -> 0[7000] [send] via NET/IB/3
363
+ jean-zay-iam37:261384:261496 [5] NCCL INFO Channel 00 : 5[8b000] -> 6[c8000] via P2P/IPC/read
364
+ jean-zay-iam40:289968:290086 [1] NCCL INFO Channel 01 : 9[b000] -> 10[48000] via P2P/IPC/read
365
+ jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 00 : 8[7000] -> 9[b000] via P2P/IPC/read
366
+ jean-zay-iam40:289970:290090 [3] NCCL INFO Channel 01 : 11[4c000] -> 12[88000] via P2P/IPC/read
367
+ jean-zay-iam40:289969:290087 [2] NCCL INFO Channel 01 : 10[48000] -> 11[4c000] via P2P/IPC/read
368
+ jean-zay-iam40:289972:290088 [5] NCCL INFO Channel 01 : 13[8b000] -> 14[c8000] via P2P/IPC/read
369
+ jean-zay-iam40:289973:290089 [6] NCCL INFO Channel 01 : 14[c8000] -> 15[cb000] via P2P/IPC/read
370
+ jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 00 : 0[7000] -> 1[b000] via P2P/IPC/read
371
+ jean-zay-iam37:261381:261500 [2] NCCL INFO Channel 01 : 2[48000] -> 3[4c000] via P2P/IPC/read
372
+ jean-zay-iam40:289971:290093 [4] NCCL INFO Channel 01 : 12[88000] -> 13[8b000] via P2P/IPC/read
373
+ jean-zay-iam37:261380:261497 [1] NCCL INFO Channel 01 : 1[b000] -> 2[48000] via P2P/IPC/read
374
+ jean-zay-iam37:261382:261501 [3] NCCL INFO Channel 01 : 3[4c000] -> 4[88000] via P2P/IPC/read
375
+ jean-zay-iam37:261385:261506 [6] NCCL INFO Channel 01 : 6[c8000] -> 7[cb000] via P2P/IPC/read
376
+ jean-zay-iam37:261386:261498 [7] NCCL INFO Channel 01 : 7[cb000] -> 8[7000] [send] via NET/IB/3
377
+ jean-zay-iam41:276753:276870 [7] NCCL INFO Channel 01 : 23[cb000] -> 24[7000] [send] via NET/IB/3
378
+ jean-zay-iam52:263015:263140 [0] NCCL INFO Channel 01 : 24[7000] -> 25[b000] via P2P/IPC/read
379
+ jean-zay-iam37:261383:261499 [4] NCCL INFO Channel 01 : 4[88000] -> 5[8b000] via P2P/IPC/read
380
+ jean-zay-iam37:261384:261496 [5] NCCL INFO Channel 01 : 5[8b000] -> 6[c8000] via P2P/IPC/read
381
+ jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 01 : 8[7000] -> 9[b000] via P2P/IPC/read
382
+ jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 01 : 16[7000] -> 17[b000] via P2P/IPC/read
383
+ jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 01 : 0[7000] -> 1[b000] via P2P/IPC/read
384
+ jean-zay-iam52:263017:263138 [2] NCCL INFO Connected all rings
385
+ jean-zay-iam41:276753:276870 [7] NCCL INFO Connected all rings
386
+ jean-zay-iam52:263018:263141 [3] NCCL INFO Connected all rings
387
+ jean-zay-iam52:263015:263140 [0] NCCL INFO Connected all rings
388
+ jean-zay-iam52:263019:263136 [4] NCCL INFO Connected all rings
389
+ jean-zay-iam37:261386:261498 [7] NCCL INFO Connected all rings
390
+ jean-zay-iam41:276751:276865 [5] NCCL INFO Connected all rings
391
+ jean-zay-iam40:289967:290091 [0] NCCL INFO Connected all rings
392
+ jean-zay-iam52:263020:263139 [5] NCCL INFO Connected all rings
393
+ jean-zay-iam40:289974:290092 [7] NCCL INFO Connected all rings
394
+ jean-zay-iam41:276750:276871 [4] NCCL INFO Connected all rings
395
+ jean-zay-iam41:276746:276868 [0] NCCL INFO Connected all rings
396
+ jean-zay-iam52:263022:263135 [7] NCCL INFO Connected all rings
397
+ jean-zay-iam37:261379:261471 [0] NCCL INFO Connected all rings
398
+ jean-zay-iam41:276748:276866 [2] NCCL INFO Connected all rings
399
+ jean-zay-iam41:276749:276869 [3] NCCL INFO Connected all rings
400
+ jean-zay-iam52:263021:263137 [6] NCCL INFO Connected all rings
401
+ jean-zay-iam40:289973:290089 [6] NCCL INFO Connected all rings
402
+ jean-zay-iam40:289969:290087 [2] NCCL INFO Connected all rings
403
+ jean-zay-iam52:263015:263140 [0] NCCL INFO Channel 00 : 16[7000] -> 24[7000] [receive] via NET/IB/1
404
+ jean-zay-iam41:276753:276870 [7] NCCL INFO Channel 00 : 23[cb000] -> 22[c8000] via P2P/IPC/read
405
+ jean-zay-iam37:261381:261500 [2] NCCL INFO Connected all rings
406
+ jean-zay-iam40:289972:290088 [5] NCCL INFO Connected all rings
407
+ jean-zay-iam52:263016:263142 [1] NCCL INFO Connected all rings
408
+ jean-zay-iam52:263022:263135 [7] NCCL INFO Channel 00 : 31[cb000] -> 30[c8000] via P2P/IPC/read
409
+ jean-zay-iam41:276752:276872 [6] NCCL INFO Connected all rings
410
+ jean-zay-iam40:289970:290090 [3] NCCL INFO Connected all rings
411
+ jean-zay-iam40:289971:290093 [4] NCCL INFO Connected all rings
412
+ jean-zay-iam37:261386:261498 [7] NCCL INFO Channel 00 : 7[cb000] -> 6[c8000] via P2P/IPC/read
413
+ jean-zay-iam37:261382:261501 [3] NCCL INFO Connected all rings
414
+ jean-zay-iam40:289974:290092 [7] NCCL INFO Channel 00 : 15[cb000] -> 14[c8000] via P2P/IPC/read
415
+ jean-zay-iam41:276747:276867 [1] NCCL INFO Connected all rings
416
+ jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 01 : 0[7000] -> 8[7000] [receive] via NET/IB/1
417
+ jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 01 : 9[b000] -> 16[7000] [receive] via NET/IB/1
418
+ jean-zay-iam37:261384:261496 [5] NCCL INFO Connected all rings
419
+ jean-zay-iam37:261383:261499 [4] NCCL INFO Connected all rings
420
+ jean-zay-iam40:289968:290086 [1] NCCL INFO Connected all rings
421
+ jean-zay-iam37:261385:261506 [6] NCCL INFO Connected all rings
422
+ jean-zay-iam52:263022:263135 [7] NCCL INFO Channel 01 : 31[cb000] -> 30[c8000] via P2P/IPC/read
423
+ jean-zay-iam41:276753:276870 [7] NCCL INFO Channel 01 : 23[cb000] -> 22[c8000] via P2P/IPC/read
424
+ jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 01 : 0[7000] -> 8[7000] [send] via NET/IB/1
425
+ jean-zay-iam52:263017:263138 [2] NCCL INFO Channel 00 : 26[48000] -> 25[b000] via P2P/IPC/read
426
+ jean-zay-iam37:261380:261497 [1] NCCL INFO Connected all rings
427
+ jean-zay-iam40:289974:290092 [7] NCCL INFO Channel 01 : 15[cb000] -> 14[c8000] via P2P/IPC/read
428
+ jean-zay-iam52:263018:263141 [3] NCCL INFO Channel 00 : 27[4c000] -> 26[48000] via P2P/IPC/read
429
+ jean-zay-iam52:263019:263136 [4] NCCL INFO Channel 00 : 28[88000] -> 27[4c000] via P2P/IPC/read
430
+ jean-zay-iam52:263020:263139 [5] NCCL INFO Channel 00 : 29[8b000] -> 28[88000] via P2P/IPC/read
431
+ jean-zay-iam41:276751:276865 [5] NCCL INFO Channel 00 : 21[8b000] -> 20[88000] via P2P/IPC/read
432
+ jean-zay-iam41:276747:276867 [1] NCCL INFO Channel 00 : 8[7000] -> 17[b000] [receive] via NET/IB/1
433
+ jean-zay-iam37:261386:261498 [7] NCCL INFO Channel 01 : 7[cb000] -> 6[c8000] via P2P/IPC/read
434
+ jean-zay-iam41:276750:276871 [4] NCCL INFO Channel 00 : 20[88000] -> 19[4c000] via P2P/IPC/read
435
+ jean-zay-iam52:263017:263138 [2] NCCL INFO Channel 01 : 26[48000] -> 25[b000] via P2P/IPC/read
436
+ jean-zay-iam52:263021:263137 [6] NCCL INFO Channel 00 : 30[c8000] -> 29[8b000] via P2P/IPC/read
437
+ jean-zay-iam41:276748:276866 [2] NCCL INFO Channel 00 : 18[48000] -> 17[b000] via P2P/IPC/read
438
+ jean-zay-iam41:276749:276869 [3] NCCL INFO Channel 00 : 19[4c000] -> 18[48000] via P2P/IPC/read
439
+ jean-zay-iam40:289968:290086 [1] NCCL INFO Channel 01 : 9[b000] -> 16[7000] [send] via NET/IB/1
440
+ jean-zay-iam52:263018:263141 [3] NCCL INFO Channel 01 : 27[4c000] -> 26[48000] via P2P/IPC/read
441
+ jean-zay-iam52:263019:263136 [4] NCCL INFO Channel 01 : 28[88000] -> 27[4c000] via P2P/IPC/read
442
+ jean-zay-iam52:263020:263139 [5] NCCL INFO Channel 01 : 29[8b000] -> 28[88000] via P2P/IPC/read
443
+ jean-zay-iam52:263016:263142 [1] NCCL INFO Channel 00 : 25[b000] -> 24[7000] via P2P/IPC/read
444
+ jean-zay-iam41:276751:276865 [5] NCCL INFO Channel 01 : 21[8b000] -> 20[88000] via P2P/IPC/read
445
+ jean-zay-iam41:276750:276871 [4] NCCL INFO Channel 01 : 20[88000] -> 19[4c000] via P2P/IPC/read
446
+ jean-zay-iam41:276752:276872 [6] NCCL INFO Channel 00 : 22[c8000] -> 21[8b000] via P2P/IPC/read
447
+ jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 00 : 16[7000] -> 0[7000] [receive] via NET/IB/1
448
+ jean-zay-iam41:276748:276866 [2] NCCL INFO Channel 01 : 18[48000] -> 17[b000] via P2P/IPC/read
449
+ jean-zay-iam41:276749:276869 [3] NCCL INFO Channel 01 : 19[4c000] -> 18[48000] via P2P/IPC/read
450
+ jean-zay-iam52:263021:263137 [6] NCCL INFO Channel 01 : 30[c8000] -> 29[8b000] via P2P/IPC/read
451
+ jean-zay-iam40:289973:290089 [6] NCCL INFO Channel 00 : 14[c8000] -> 13[8b000] via P2P/IPC/read
452
+ jean-zay-iam41:276752:276872 [6] NCCL INFO Channel 01 : 22[c8000] -> 21[8b000] via P2P/IPC/read
453
+ jean-zay-iam40:289969:290087 [2] NCCL INFO Channel 00 : 10[48000] -> 9[b000] via P2P/IPC/read
454
+ jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 00 : 8[7000] -> 17[b000] [send] via NET/IB/1
455
+ jean-zay-iam40:289970:290090 [3] NCCL INFO Channel 00 : 11[4c000] -> 10[48000] via P2P/IPC/read
456
+ jean-zay-iam40:289972:290088 [5] NCCL INFO Channel 00 : 13[8b000] -> 12[88000] via P2P/IPC/read
457
+ jean-zay-iam40:289971:290093 [4] NCCL INFO Channel 00 : 12[88000] -> 11[4c000] via P2P/IPC/read
458
+ jean-zay-iam37:261381:261500 [2] NCCL INFO Channel 00 : 2[48000] -> 1[b000] via P2P/IPC/read
459
+ jean-zay-iam52:263016:263142 [1] NCCL INFO Channel 01 : 25[b000] -> 24[7000] via P2P/IPC/read
460
+ jean-zay-iam37:261382:261501 [3] NCCL INFO Channel 00 : 3[4c000] -> 2[48000] via P2P/IPC/read
461
+ jean-zay-iam37:261385:261506 [6] NCCL INFO Channel 00 : 6[c8000] -> 5[8b000] via P2P/IPC/read
462
+ jean-zay-iam37:261384:261496 [5] NCCL INFO Channel 00 : 5[8b000] -> 4[88000] via P2P/IPC/read
463
+ jean-zay-iam40:289973:290089 [6] NCCL INFO Channel 01 : 14[c8000] -> 13[8b000] via P2P/IPC/read
464
+ jean-zay-iam40:289969:290087 [2] NCCL INFO Channel 01 : 10[48000] -> 9[b000] via P2P/IPC/read
465
+ jean-zay-iam40:289968:290086 [1] NCCL INFO Channel 01 : 16[7000] -> 9[b000] [receive] via NET/IB/1
466
+ jean-zay-iam37:261383:261499 [4] NCCL INFO Channel 00 : 4[88000] -> 3[4c000] via P2P/IPC/read
467
+ jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 00 : 0[7000] -> 16[7000] [send] via NET/IB/1
468
+ jean-zay-iam40:289970:290090 [3] NCCL INFO Channel 01 : 11[4c000] -> 10[48000] via P2P/IPC/read
469
+ jean-zay-iam40:289972:290088 [5] NCCL INFO Channel 01 : 13[8b000] -> 12[88000] via P2P/IPC/read
470
+ jean-zay-iam40:289971:290093 [4] NCCL INFO Channel 01 : 12[88000] -> 11[4c000] via P2P/IPC/read
471
+ jean-zay-iam37:261380:261497 [1] NCCL INFO Channel 00 : 1[b000] -> 0[7000] via P2P/IPC/read
472
+ jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 01 : 24[7000] -> 8[7000] [receive] via NET/IB/1
473
+ jean-zay-iam37:261381:261500 [2] NCCL INFO Channel 01 : 2[48000] -> 1[b000] via P2P/IPC/read
474
+ jean-zay-iam37:261382:261501 [3] NCCL INFO Channel 01 : 3[4c000] -> 2[48000] via P2P/IPC/read
475
+ jean-zay-iam37:261385:261506 [6] NCCL INFO Channel 01 : 6[c8000] -> 5[8b000] via P2P/IPC/read
476
+ jean-zay-iam37:261384:261496 [5] NCCL INFO Channel 01 : 5[8b000] -> 4[88000] via P2P/IPC/read
477
+ jean-zay-iam37:261383:261499 [4] NCCL INFO Channel 01 : 4[88000] -> 3[4c000] via P2P/IPC/read
478
+ jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 00 : 16[7000] -> 24[7000] [send] via NET/IB/1
479
+ jean-zay-iam37:261380:261497 [1] NCCL INFO Channel 01 : 1[b000] -> 0[7000] via P2P/IPC/read
480
+ jean-zay-iam41:276753:276870 [7] NCCL INFO Connected all trees
481
+ jean-zay-iam41:276753:276870 [7] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
482
+ jean-zay-iam41:276753:276870 [7] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
483
+ jean-zay-iam52:263022:263135 [7] NCCL INFO Connected all trees
484
+ jean-zay-iam52:263022:263135 [7] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
485
+ jean-zay-iam52:263022:263135 [7] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
486
+ jean-zay-iam41:276747:276867 [1] NCCL INFO Channel 00 : 17[b000] -> 8[7000] [send] via NET/IB/1
487
+ jean-zay-iam40:289974:290092 [7] NCCL INFO Connected all trees
488
+ jean-zay-iam40:289974:290092 [7] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
489
+ jean-zay-iam40:289974:290092 [7] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
490
+ jean-zay-iam52:263018:263141 [3] NCCL INFO Connected all trees
491
+ jean-zay-iam52:263018:263141 [3] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
492
+ jean-zay-iam52:263018:263141 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
493
+ jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 01 : 8[7000] -> 24[7000] [send] via NET/IB/1
494
+ jean-zay-iam52:263019:263136 [4] NCCL INFO Connected all trees
495
+ jean-zay-iam52:263019:263136 [4] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
496
+ jean-zay-iam52:263019:263136 [4] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
497
+ jean-zay-iam37:261386:261498 [7] NCCL INFO Connected all trees
498
+ jean-zay-iam37:261386:261498 [7] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
499
+ jean-zay-iam37:261386:261498 [7] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
500
+ jean-zay-iam41:276750:276871 [4] NCCL INFO Connected all trees
501
+ jean-zay-iam41:276750:276871 [4] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
502
+ jean-zay-iam41:276750:276871 [4] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
503
+ jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 00 : 0[7000] -> 16[7000] [receive] via NET/IB/1
504
+ jean-zay-iam41:276749:276869 [3] NCCL INFO Connected all trees
505
+ jean-zay-iam41:276749:276869 [3] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
506
+ jean-zay-iam41:276749:276869 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
507
+ jean-zay-iam41:276752:276872 [6] NCCL INFO Connected all trees
508
+ jean-zay-iam41:276752:276872 [6] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
509
+ jean-zay-iam52:263021:263137 [6] NCCL INFO Connected all trees
510
+ jean-zay-iam41:276752:276872 [6] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
511
+ jean-zay-iam52:263021:263137 [6] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
512
+ jean-zay-iam52:263021:263137 [6] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
513
+ jean-zay-iam41:276751:276865 [5] NCCL INFO Connected all trees
514
+ jean-zay-iam41:276751:276865 [5] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
515
+ jean-zay-iam41:276751:276865 [5] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
516
+ jean-zay-iam52:263015:263140 [0] NCCL INFO Channel 01 : 8[7000] -> 24[7000] [receive] via NET/IB/1
517
+ jean-zay-iam52:263020:263139 [5] NCCL INFO Connected all trees
518
+ jean-zay-iam52:263020:263139 [5] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
519
+ jean-zay-iam52:263020:263139 [5] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
520
+ jean-zay-iam52:263017:263138 [2] NCCL INFO Connected all trees
521
+ jean-zay-iam52:263017:263138 [2] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
522
+ jean-zay-iam52:263017:263138 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
523
+ jean-zay-iam40:289973:290089 [6] NCCL INFO Connected all trees
524
+ jean-zay-iam40:289973:290089 [6] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
525
+ jean-zay-iam40:289973:290089 [6] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
526
+ jean-zay-iam40:289970:290090 [3] NCCL INFO Connected all trees
527
+ jean-zay-iam40:289970:290090 [3] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
528
+ jean-zay-iam40:289970:290090 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
529
+ jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 00 : 16[7000] -> 0[7000] [send] via NET/IB/1
530
+ jean-zay-iam40:289971:290093 [4] NCCL INFO Connected all trees
531
+ jean-zay-iam40:289971:290093 [4] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
532
+ jean-zay-iam40:289971:290093 [4] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
533
+ jean-zay-iam40:289972:290088 [5] NCCL INFO Connected all trees
534
+ jean-zay-iam40:289972:290088 [5] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
535
+ jean-zay-iam40:289972:290088 [5] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
536
+ jean-zay-iam52:263015:263140 [0] NCCL INFO Channel 01 : 24[7000] -> 8[7000] [send] via NET/IB/1
537
+ jean-zay-iam37:261385:261506 [6] NCCL INFO Connected all trees
538
+ jean-zay-iam37:261385:261506 [6] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
539
+ jean-zay-iam37:261385:261506 [6] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
540
+ jean-zay-iam37:261382:261501 [3] NCCL INFO Connected all trees
541
+ jean-zay-iam37:261382:261501 [3] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
542
+ jean-zay-iam37:261382:261501 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
543
+ jean-zay-iam37:261383:261499 [4] NCCL INFO Connected all trees
544
+ jean-zay-iam37:261383:261499 [4] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
545
+ jean-zay-iam37:261383:261499 [4] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
546
+ jean-zay-iam37:261384:261496 [5] NCCL INFO Connected all trees
547
+ jean-zay-iam37:261384:261496 [5] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
548
+ jean-zay-iam37:261384:261496 [5] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
549
+ jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 00 : 24[7000] -> 16[7000] [receive] via NET/IB/1
550
+ jean-zay-iam37:261381:261500 [2] NCCL INFO Connected all trees
551
+ jean-zay-iam37:261381:261500 [2] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
552
+ jean-zay-iam37:261381:261500 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
553
+ jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 01 : 8[7000] -> 0[7000] [receive] via NET/IB/1
554
+ jean-zay-iam52:263015:263140 [0] NCCL INFO Channel 00 : 24[7000] -> 16[7000] [send] via NET/IB/1
555
+ jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 00 : 17[b000] -> 8[7000] [receive] via NET/IB/1
556
+ jean-zay-iam41:276747:276867 [1] NCCL INFO Channel 00 : 17[b000] -> 16[7000] via P2P/IPC/read
557
+ jean-zay-iam41:276747:276867 [1] NCCL INFO Channel 01 : 17[b000] -> 16[7000] via P2P/IPC/read
558
+ jean-zay-iam52:263015:263140 [0] NCCL INFO Connected all trees
559
+ jean-zay-iam52:263015:263140 [0] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
560
+ jean-zay-iam52:263015:263140 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
561
+ jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 01 : 8[7000] -> 0[7000] [send] via NET/IB/1
562
+ jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 01 : 16[7000] -> 9[b000] [send] via NET/IB/1
563
+ jean-zay-iam52:263016:263142 [1] NCCL INFO Connected all trees
564
+ jean-zay-iam52:263016:263142 [1] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
565
+ jean-zay-iam52:263016:263142 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
566
+ jean-zay-iam52:263019:263136 [4] NCCL INFO comm 0x14b1a8002fb0 rank 28 nranks 32 cudaDev 4 busId 88000 - Init COMPLETE
567
+ jean-zay-iam52:263020:263139 [5] NCCL INFO comm 0x151418002fb0 rank 29 nranks 32 cudaDev 5 busId 8b000 - Init COMPLETE
568
+ jean-zay-iam52:263016:263142 [1] NCCL INFO comm 0x145588002fb0 rank 25 nranks 32 cudaDev 1 busId b000 - Init COMPLETE
569
+ jean-zay-iam52:263015:263140 [0] NCCL INFO comm 0x14c858002fb0 rank 24 nranks 32 cudaDev 0 busId 7000 - Init COMPLETE
570
+ jean-zay-iam52:263017:263138 [2] NCCL INFO comm 0x14e858002fb0 rank 26 nranks 32 cudaDev 2 busId 48000 - Init COMPLETE
571
+ jean-zay-iam52:263018:263141 [3] NCCL INFO comm 0x150208002fb0 rank 27 nranks 32 cudaDev 3 busId 4c000 - Init COMPLETE
572
+ jean-zay-iam52:263021:263137 [6] NCCL INFO comm 0x151df8002fb0 rank 30 nranks 32 cudaDev 6 busId c8000 - Init COMPLETE
573
+ jean-zay-iam52:263022:263135 [7] NCCL INFO comm 0x152728002fb0 rank 31 nranks 32 cudaDev 7 busId cb000 - Init COMPLETE
574
+ jean-zay-iam41:276748:276866 [2] NCCL INFO Connected all trees
575
+ jean-zay-iam41:276748:276866 [2] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
576
+ jean-zay-iam41:276748:276866 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
577
+ jean-zay-iam40:289968:290086 [1] NCCL INFO Channel 00 : 9[b000] -> 8[7000] via P2P/IPC/read
578
+ jean-zay-iam40:289968:290086 [1] NCCL INFO Channel 01 : 9[b000] -> 8[7000] via P2P/IPC/read
579
+ jean-zay-iam37:261379:261471 [0] NCCL INFO Connected all trees
580
+ jean-zay-iam37:261379:261471 [0] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
581
+ jean-zay-iam37:261379:261471 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
582
+ jean-zay-iam41:276746:276868 [0] NCCL INFO Connected all trees
583
+ jean-zay-iam41:276746:276868 [0] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
584
+ jean-zay-iam41:276746:276868 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
585
+ jean-zay-iam37:261380:261497 [1] NCCL INFO Connected all trees
586
+ jean-zay-iam37:261380:261497 [1] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
587
+ jean-zay-iam37:261380:261497 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
588
+ jean-zay-iam37:261380:261497 [1] NCCL INFO comm 0x151790002fb0 rank 1 nranks 32 cudaDev 1 busId b000 - Init COMPLETE
589
+ jean-zay-iam37:261379:261471 [0] NCCL INFO comm 0x151f24002fb0 rank 0 nranks 32 cudaDev 0 busId 7000 - Init COMPLETE
590
+ jean-zay-iam37:261382:261501 [3] NCCL INFO comm 0x14a538002fb0 rank 3 nranks 32 cudaDev 3 busId 4c000 - Init COMPLETE
591
+ jean-zay-iam37:261381:261500 [2] NCCL INFO comm 0x151028002fb0 rank 2 nranks 32 cudaDev 2 busId 48000 - Init COMPLETE
592
+ jean-zay-iam37:261383:261499 [4] NCCL INFO comm 0x152340002fb0 rank 4 nranks 32 cudaDev 4 busId 88000 - Init COMPLETE
593
+ jean-zay-iam37:261384:261496 [5] NCCL INFO comm 0x14d048002fb0 rank 5 nranks 32 cudaDev 5 busId 8b000 - Init COMPLETE
594
+ jean-zay-iam37:261379:261379 [0] NCCL INFO Launch mode Parallel
595
+ jean-zay-iam37:261386:261498 [7] NCCL INFO comm 0x1519b0002fb0 rank 7 nranks 32 cudaDev 7 busId cb000 - Init COMPLETE
596
+ jean-zay-iam37:261385:261506 [6] NCCL INFO comm 0x14bd98002fb0 rank 6 nranks 32 cudaDev 6 busId c8000 - Init COMPLETE
597
+ jean-zay-iam41:276747:276867 [1] NCCL INFO Connected all trees
598
+ jean-zay-iam41:276747:276867 [1] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
599
+ jean-zay-iam41:276747:276867 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
600
+ jean-zay-iam41:276749:276869 [3] NCCL INFO comm 0x14d508002fb0 rank 19 nranks 32 cudaDev 3 busId 4c000 - Init COMPLETE
601
+ jean-zay-iam40:289967:290091 [0] NCCL INFO Connected all trees
602
+ jean-zay-iam41:276748:276866 [2] NCCL INFO comm 0x14ae78002fb0 rank 18 nranks 32 cudaDev 2 busId 48000 - Init COMPLETE
603
+ jean-zay-iam40:289967:290091 [0] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
604
+ jean-zay-iam40:289967:290091 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
605
+ jean-zay-iam41:276747:276867 [1] NCCL INFO comm 0x14d928002fb0 rank 17 nranks 32 cudaDev 1 busId b000 - Init COMPLETE
606
+ jean-zay-iam41:276750:276871 [4] NCCL INFO comm 0x146d68002fb0 rank 20 nranks 32 cudaDev 4 busId 88000 - Init COMPLETE
607
+ jean-zay-iam41:276753:276870 [7] NCCL INFO comm 0x1523f8002fb0 rank 23 nranks 32 cudaDev 7 busId cb000 - Init COMPLETE
608
+ jean-zay-iam41:276746:276868 [0] NCCL INFO comm 0x152f60002fb0 rank 16 nranks 32 cudaDev 0 busId 7000 - Init COMPLETE
609
+ jean-zay-iam41:276751:276865 [5] NCCL INFO comm 0x14c788002fb0 rank 21 nranks 32 cudaDev 5 busId 8b000 - Init COMPLETE
610
+ jean-zay-iam41:276752:276872 [6] NCCL INFO comm 0x14e538002fb0 rank 22 nranks 32 cudaDev 6 busId c8000 - Init COMPLETE
611
+ jean-zay-iam40:289968:290086 [1] NCCL INFO Connected all trees
612
+ jean-zay-iam40:289968:290086 [1] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
613
+ jean-zay-iam40:289968:290086 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
614
+ jean-zay-iam40:289969:290087 [2] NCCL INFO Connected all trees
615
+ jean-zay-iam40:289969:290087 [2] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512
616
+ jean-zay-iam40:289969:290087 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer
617
+ jean-zay-iam40:289969:290087 [2] NCCL INFO comm 0x154f98002fb0 rank 10 nranks 32 cudaDev 2 busId 48000 - Init COMPLETE
618
+ jean-zay-iam40:289971:290093 [4] NCCL INFO comm 0x1529e8002fb0 rank 12 nranks 32 cudaDev 4 busId 88000 - Init COMPLETE
619
+ jean-zay-iam40:289970:290090 [3] NCCL INFO comm 0x14ee38002fb0 rank 11 nranks 32 cudaDev 3 busId 4c000 - Init COMPLETE
620
+ jean-zay-iam40:289973:290089 [6] NCCL INFO comm 0x145bb0002fb0 rank 14 nranks 32 cudaDev 6 busId c8000 - Init COMPLETE
621
+ jean-zay-iam40:289972:290088 [5] NCCL INFO comm 0x14d508002fb0 rank 13 nranks 32 cudaDev 5 busId 8b000 - Init COMPLETE
622
+ jean-zay-iam40:289968:290086 [1] NCCL INFO comm 0x14d558002fb0 rank 9 nranks 32 cudaDev 1 busId b000 - Init COMPLETE
623
+ jean-zay-iam40:289974:290092 [7] NCCL INFO comm 0x1494b8002fb0 rank 15 nranks 32 cudaDev 7 busId cb000 - Init COMPLETE
624
+ jean-zay-iam40:289967:290091 [0] NCCL INFO comm 0x14aa40002fb0 rank 8 nranks 32 cudaDev 0 busId 7000 - Init COMPLETE
625
+ ignore me 17
626
+ 6:
627
+ duration: 3.9563 sec
628
+ algo throughput: 16176643777.3540 bps, 16.1766 Gbps
629
+ busbw: 15.6711 Gbps
630
+ ignore me 17
631
+ 7:
632
+ duration: 4.1011 sec
633
+ algo throughput: 15605538666.8284 bps, 15.6055 Gbps
634
+ busbw: 15.1179 Gbps
635
+ ignore me 17
636
+ 5:
637
+ duration: 4.0281 sec
638
+ algo throughput: 15888388696.7879 bps, 15.8884 Gbps
639
+ busbw: 15.3919 Gbps
640
+ ignore me 17
641
+ ignore me 17
642
+ 27:
643
+ duration: 4.1446 sec
644
+ algo throughput: 15441789907.3424 bps, 15.4418 Gbps
645
+ busbw: 14.9592 Gbps
646
+ 4:
647
+ duration: 4.1584 sec
648
+ algo throughput: 15390377253.3963 bps, 15.3904 Gbps
649
+ busbw: 14.9094 Gbps
650
+ ignore me 17
651
+ ignore me 17
652
+ 28:
653
+ duration: 4.0857 sec
654
+ ignore me 17
655
+ algo throughput: 15664581341.3504 bps, 15.6646 Gbps
656
+ busbw: 15.1751 Gbps
657
+ 26:
658
+ duration: 4.1296 sec
659
+ algo throughput: 15497834133.7166 bps, 15.4978 Gbps
660
+ busbw: 15.0135 Gbps
661
+ 3:
662
+ duration: 4.1508 sec
663
+ algo throughput: 15418582053.9969 bps, 15.4186 Gbps
664
+ busbw: 14.9368 Gbps
665
+ ignore me 17
666
+ ignore me 17
667
+ 8:
668
+ duration: 4.2224 sec
669
+ algo throughput: 15157302718.4214 bps, 15.1573 Gbps
670
+ busbw: 14.6836 Gbps
671
+ ignore me 17
672
+ 29:
673
+ duration: 4.0621 sec
674
+ algo throughput: 15755272218.1164 bps, 15.7553 Gbps
675
+ busbw: 15.2629 Gbps
676
+ 25:
677
+ duration: 4.1516 sec
678
+ algo throughput: 15415828590.9963 bps, 15.4158 Gbps
679
+ busbw: 14.9341 Gbps
680
+ ignore me 17
681
+ ignore me 17
682
+ 9:
683
+ duration: 4.0906 sec
684
+ algo throughput: 15645779547.2488 bps, 15.6458 Gbps
685
+ busbw: 15.1568 Gbps
686
+ ignore me 17
687
+ ignore me 17
688
+ ignore me 17
689
+ ignore me 17
690
+ 23:
691
+ duration: 4.1569 sec
692
+ 30:
693
+ duration: 4.0722 sec
694
+ algo throughput: 15716173146.2812 bps, 15.7162 Gbps
695
+ 1:
696
+ duration: 4.0663 sec
697
+ algo throughput: 15396140153.8145 bps, 15.3961 Gbps
698
+ busbw: 14.9150 Gbps
699
+ algo throughput: 15739134214.8659 bps, 15.7391 Gbps
700
+ busbw: 15.2473 Gbps
701
+ busbw: 15.2250 Gbps
702
+ 22:
703
+ duration: 4.0428 sec
704
+ algo throughput: 15830448441.2183 bps, 15.8304 Gbps
705
+ busbw: 15.3357 Gbps
706
+ ignore me 17
707
+ 2:
708
+ duration: 4.1513 sec
709
+ algo throughput: 15416737873.4375 bps, 15.4167 Gbps
710
+ busbw: 14.9350 Gbps
711
+ ignore me 17
712
+ ignore me 17
713
+ 10:
714
+ duration: 4.1135 sec
715
+ 24:
716
+ duration: 4.0613 sec
717
+ algo throughput: 15758479220.2859 bps, 15.7585 Gbps
718
+ busbw: 15.2660 Gbps
719
+ algo throughput: 15558588332.8945 bps, 15.5586 Gbps
720
+ busbw: 15.0724 Gbps
721
+ ignore me 17
722
+ 31:
723
+ duration: 4.1502 sec
724
+ algo throughput: 15420839540.9777 bps, 15.4208 Gbps
725
+ busbw: 14.9389 Gbps
726
+ 21:
727
+ duration: 4.1419 sec
728
+ algo throughput: 15451690470.9343 bps, 15.4517 Gbps
729
+ busbw: 14.9688 Gbps
730
+ ignore me 17
731
+ ignore me 17
732
+ ignore me 17
733
+ 11:
734
+ duration: 4.0492 sec
735
+ algo throughput: 15805693708.4176 bps, 15.8057 Gbps
736
+ 20:
737
+ duration: 4.0993 sec
738
+ algo throughput: 15612440511.8644 bps, 15.6124 Gbps
739
+ busbw: 15.1246 Gbps
740
+ 0:
741
+ duration: 4.0120 sec
742
+ algo throughput: 15952303597.3018 bps, 15.9523 Gbps
743
+ busbw: 15.3118 Gbps
744
+ busbw: 15.4538 Gbps
745
+ ignore me 17
746
+ ignore me 17
747
+ 12:
748
+ duration: 4.1850 sec
749
+ algo throughput: 15292749814.3865 bps, 15.2927 Gbps
750
+ busbw: 14.8149 Gbps
751
+ 19:
752
+ duration: 4.0412 sec
753
+ algo throughput: 15836843924.5534 bps, 15.8368 Gbps
754
+ busbw: 15.3419 Gbps
755
+ ignore me 17
756
+ 13:
757
+ duration: 4.0840 sec
758
+ algo throughput: 15670769926.9476 bps, 15.6708 Gbps
759
+ busbw: 15.1811 Gbps
760
+ ignore me 17
761
+ 18:
762
+ duration: 4.1647 sec
763
+ algo throughput: 15367278261.5983 bps, 15.3673 Gbps
764
+ busbw: 14.8871 Gbps
765
+ ignore me 17
766
+ 14:
767
+ duration: 4.0438 sec
768
+ algo throughput: 15826582974.8276 bps, 15.8266 Gbps
769
+ busbw: 15.3320 Gbps
770
+ ignore me 17
771
+ ignore me 17
772
+ 17:
773
+ duration: 4.1553 sec
774
+ algo throughput: 15401946302.4121 bps, 15.4019 Gbps
775
+ 15:
776
+ duration: 4.1608 sec
777
+ algo throughput: 15381558817.4705 bps, 15.3816 Gbps
778
+ busbw: 14.9206 Gbps
779
+ busbw: 14.9009 Gbps
780
+ ignore me 17
781
+ 16:
782
+ duration: 4.0474 sec
783
+ algo throughput: 15812815660.2083 bps, 15.8128 Gbps
784
+ busbw: 15.3187 Gbps
785
+ ignore me 555
786
+ 23:
787
+ duration: 1.5186 sec
788
+ algo throughput: 42143980222.5332 bps, 42.1440 Gbps
789
+ ignore me 555
790
+ busbw: 40.8270 Gbps
791
+ 9:
792
+ duration: 1.5187 sec
793
+ algo throughput: 42140589448.6002 bps, 42.1406 Gbps
794
+ busbw: 40.8237 Gbps
795
+ ignore me 555
796
+ 22:
797
+ duration: 1.5187 sec
798
+ algo throughput: 42140378571.5530 bps, 42.1404 Gbps
799
+ ignore me 555
800
+ busbw: 40.8235 Gbps
801
+ 24:
802
+ duration: 1.5187 sec
803
+ algo throughput: 42142240285.3888 bps, 42.1422 Gbps
804
+ busbw: 40.8253 Gbps
805
+ ignore me 555
806
+ 7:
807
+ duration: 1.5199 sec
808
+ algo throughput: 42108029847.7049 bps, 42.1080 Gbps
809
+ busbw: 40.7922 Gbps
810
+ ignore me 555
811
+ ignore me 555
812
+ 10:
813
+ duration: 1.5188 sec
814
+ algo throughput: 42138916267.0821 bps, 42.1389 Gbps
815
+ busbw: 40.8221 Gbps
816
+ 8:
817
+ duration: 1.5192 sec
818
+ algo throughput: 42126338602.2545 bps, 42.1263 Gbps
819
+ busbw: 40.8099 Gbps
820
+ ignore me 555
821
+ ignore me 555
822
+ ignore me 555
823
+ 21:
824
+ duration: 1.5188 sec
825
+ algo throughput: 42139898494.4063 bps, 42.1399 Gbps
826
+ busbw: 40.8230 Gbps
827
+ 25:
828
+ duration: 1.5192 sec
829
+ algo throughput: 42127092502.8457 bps, 42.1271 Gbps
830
+ busbw: 40.8106 Gbps
831
+ 6:
832
+ duration: 1.5202 sec
833
+ algo throughput: 42099423136.7009 bps, 42.0994 Gbps
834
+ busbw: 40.7838 Gbps
835
+ ignore me 555
836
+ 11:
837
+ duration: 1.5187 sec
838
+ algo throughput: 42141289163.4721 bps, 42.1413 Gbps
839
+ ignore me 555
840
+ busbw: 40.8244 Gbps
841
+ 20:
842
+ duration: 1.5188 sec
843
+ algo throughput: 42139687792.2383 bps, 42.1397 Gbps
844
+ busbw: 40.8228 Gbps
845
+ ignore me 555
846
+ 26:
847
+ duration: 1.5197 sec
848
+ algo throughput: 42113294024.4995 bps, 42.1133 Gbps
849
+ busbw: 40.7973 Gbps
850
+ ignore me 555
851
+ ignore me 555
852
+ 5:
853
+ duration: 1.5202 sec
854
+ algo throughput: 42100022978.8723 bps, 42.1000 Gbps
855
+ busbw: 40.7844 Gbps
856
+ 12:
857
+ duration: 1.5187 sec
858
+ algo throughput: 42141483180.7297 bps, 42.1415 Gbps
859
+ busbw: 40.8246 Gbps
860
+ ignore me 555
861
+ 19:
862
+ duration: 1.5188 sec
863
+ algo throughput: 42139070669.3367 bps, 42.1391 Gbps
864
+ busbw: 40.8222 Gbps
865
+ ignore me 555
866
+ ignore me 555
867
+ 13:
868
+ duration: 1.5187 sec
869
+ algo throughput: 42140413754.7281 bps, 42.1404 Gbps
870
+ 27:
871
+ duration: 1.5202 sec
872
+ algo throughput: 42099139976.4359 bps, 42.0991 Gbps
873
+ busbw: 40.7835 Gbps
874
+ busbw: 40.8235 Gbps
875
+ ignore me 555
876
+ 4:
877
+ duration: 1.5203 sec
878
+ algo throughput: 42097969076.0652 bps, 42.0980 Gbps
879
+ busbw: 40.7824 Gbps
880
+ ignore me 555
881
+ 18:
882
+ duration: 1.5187 sec
883
+ algo throughput: 42141134996.9228 bps, 42.1411 Gbps
884
+ busbw: 40.8242 Gbps
885
+ ignore me 555
886
+ 28:
887
+ duration: 1.5203 sec
888
+ algo throughput: 42097422955.6261 bps, 42.0974 Gbps
889
+ ignore me 555
890
+ busbw: 40.7819 Gbps
891
+ ignore me 555
892
+ 14:
893
+ duration: 1.5188 sec
894
+ algo throughput: 42139893361.7641 bps, 42.1399 Gbps
895
+ busbw: 40.8230 Gbps
896
+ 3:
897
+ duration: 1.5203 sec
898
+ algo throughput: 42097598433.0412 bps, 42.0976 Gbps
899
+ busbw: 40.7820 Gbps
900
+ ignore me 555
901
+ 17:
902
+ duration: 1.5188 sec
903
+ algo throughput: 42139267495.6574 bps, 42.1393 Gbps
904
+ busbw: 40.8224 Gbps
905
+ ignore me 555
906
+ ignore me 555
907
+ 29:
908
+ duration: 1.5203 sec
909
+ algo throughput: 42096144082.6273 bps, 42.0961 Gbps
910
+ ignore me 555
911
+ busbw: 40.7806 Gbps
912
+ 15:
913
+ duration: 1.5188 sec
914
+ algo throughput: 42137175969.6847 bps, 42.1372 Gbps
915
+ ignore me 555
916
+ busbw: 40.8204 Gbps
917
+ 16:
918
+ duration: 1.5186 sec
919
+ algo throughput: 42144770940.2506 bps, 42.1448 Gbps
920
+ busbw: 40.8277 Gbps
921
+ 2:
922
+ duration: 1.5201 sec
923
+ algo throughput: 42101391688.1200 bps, 42.1014 Gbps
924
+ busbw: 40.7857 Gbps
925
+ ignore me 555
926
+ ignore me 555
927
+ 30:
928
+ duration: 1.5203 sec
929
+ algo throughput: 42096228974.3786 bps, 42.0962 Gbps
930
+ busbw: 40.7807 Gbps
931
+ 1:
932
+ duration: 1.5204 sec
933
+ algo throughput: 42095494315.5608 bps, 42.0955 Gbps
934
+ busbw: 40.7800 Gbps
935
+ ignore me 555
936
+ 31:
937
+ duration: 1.5203 sec
938
+ algo throughput: 42096577970.2344 bps, 42.0966 Gbps
939
+ busbw: 40.7811 Gbps
940
+ ignore me 555
941
+ 0:
942
+ duration: 1.5203 sec
943
+ algo throughput: 42097401467.1174 bps, 42.0974 Gbps
944
+ busbw: 40.7819 Gbps
945
+ ignore me 17760
946
+ 19:
947
+ duration: 1.5271 sec
948
+ algo throughput: 41910600634.9022 bps, 41.9106 Gbps
949
+ busbw: 40.6009 Gbps
950
+ ignore me 17760
951
+ ignore me 17760
952
+ 18:
953
+ duration: 1.5270 sec
954
+ algo throughput: 41911582289.7142 bps, 41.9116 Gbps
955
+ busbw: 40.6018 Gbps
956
+ 20:
957
+ duration: 1.5276 sec
958
+ algo throughput: 41894987422.3905 bps, 41.8950 Gbps
959
+ busbw: 40.5858 Gbps
960
+ ignore me 17760
961
+ ignore me 17760
962
+ 17:
963
+ duration: 1.5270 sec
964
+ algo throughput: 41913406576.8859 bps, 41.9134 Gbps
965
+ busbw: 40.6036 Gbps
966
+ ignore me 17760
967
+ 21:
968
+ duration: 1.5280 sec
969
+ algo throughput: 41885069299.4918 bps, 41.8851 Gbps
970
+ busbw: 40.5762 Gbps
971
+ ignore me 17760
972
+ 14:
973
+ duration: 1.5272 sec
974
+ algo throughput: 41907314947.6113 bps, 41.9073 Gbps
975
+ busbw: 40.5977 Gbps
976
+ 15:
977
+ duration: 1.5270 sec
978
+ algo throughput: 41913242272.3447 bps, 41.9132 Gbps
979
+ busbw: 40.6035 Gbps
980
+ ignore me 17760
981
+ ignore me 17760
982
+ 13:
983
+ duration: 1.5277 sec
984
+ algo throughput: 41893273876.8880 bps, 41.8933 Gbps
985
+ busbw: 40.5841 Gbps
986
+ ignore me 17760
987
+ 16:
988
+ duration: 1.5271 sec
989
+ algo throughput: 41909230280.3461 bps, 41.9092 Gbps
990
+ busbw: 40.5996 Gbps
991
+ 22:
992
+ duration: 1.5286 sec
993
+ algo throughput: 41869319488.2197 bps, 41.8693 Gbps
994
+ busbw: 40.5609 Gbps
995
+ ignore me 17760
996
+ ignore me 17760
997
+ 23:
998
+ duration: 1.5289 sec
999
+ algo throughput: 41861290350.4216 bps, 41.8613 Gbps
1000
+ 12:
1001
+ duration: 1.5281 sec
1002
+ algo throughput: 41882850453.1701 bps, 41.8829 Gbps
1003
+ busbw: 40.5740 Gbps
1004
+ busbw: 40.5531 Gbps
1005
+ ignore me 17760
1006
+ 11:
1007
+ duration: 1.5286 sec
1008
+ algo throughput: 41868966830.1641 bps, 41.8690 Gbps
1009
+ busbw: 40.5606 Gbps
1010
+ ignore me 17760
1011
+ ignore me 17760
1012
+ 24:
1013
+ duration: 1.5291 sec
1014
+ algo throughput: 41854797523.2289 bps, 41.8548 Gbps
1015
+ 10:
1016
+ duration: 1.5290 sec
1017
+ algo throughput: 41858049187.4726 bps, 41.8580 Gbps
1018
+ busbw: 40.5468 Gbps
1019
+ busbw: 40.5500 Gbps
1020
+ ignore me 17760
1021
+ 25:
1022
+ duration: 1.5291 sec
1023
+ algo throughput: 41855697296.6685 bps, 41.8557 Gbps
1024
+ busbw: 40.5477 Gbps
1025
+ ignore me 17760
1026
+ ignore me 17760
1027
+ ignore me 17760
1028
+ 9:
1029
+ duration: 1.5296 sec
1030
+ algo throughput: 41841767653.6339 bps, 41.8418 Gbps
1031
+ busbw: 40.5342 Gbps
1032
+ 6:
1033
+ duration: 1.5292 sec
1034
+ algo throughput: 41851931325.8954 bps, 41.8519 Gbps
1035
+ busbw: 40.5441 Gbps
1036
+ 7:
1037
+ duration: 1.5294 sec
1038
+ algo throughput: 41846364025.0241 bps, 41.8464 Gbps
1039
+ busbw: 40.5387 Gbps
1040
+ ignore me 17760
1041
+ 26:
1042
+ duration: 1.5290 sec
1043
+ algo throughput: 41856070811.5191 bps, 41.8561 Gbps
1044
+ busbw: 40.5481 Gbps
1045
+ ignore me 17760
1046
+ ignore me 17760
1047
+ 5:
1048
+ duration: 1.5291 sec
1049
+ algo throughput: 41855875143.2076 bps, 41.8559 Gbps
1050
+ busbw: 40.5479 Gbps
1051
+ 8:
1052
+ duration: 1.5295 sec
1053
+ algo throughput: 41843741534.2125 bps, 41.8437 Gbps
1054
+ busbw: 40.5361 Gbps
1055
+ ignore me 17760
1056
+ 27:
1057
+ duration: 1.5290 sec
1058
+ algo throughput: 41856588048.6577 bps, 41.8566 Gbps
1059
+ busbw: 40.5486 Gbps
1060
+ ignore me 17760
1061
+ 4:
1062
+ duration: 1.5290 sec
1063
+ algo throughput: 41856245346.9914 bps, 41.8562 Gbps
1064
+ busbw: 40.5482 Gbps
1065
+ ignore me 17760
1066
+ 28:
1067
+ duration: 1.5290 sec
1068
+ algo throughput: 41858071525.4799 bps, 41.8581 Gbps
1069
+ busbw: 40.5500 Gbps
1070
+ ignore me 17760
1071
+ 3:
1072
+ duration: 1.5290 sec
1073
+ algo throughput: 41857294677.8322 bps, 41.8573 Gbps
1074
+ busbw: 40.5493 Gbps
1075
+ ignore me 17760
1076
+ 29:
1077
+ duration: 1.5289 sec
1078
+ algo throughput: 41859219678.2562 bps, 41.8592 Gbps
1079
+ busbw: 40.5511 Gbps
1080
+ ignore me 17760
1081
+ 2:
1082
+ duration: 1.5289 sec
1083
+ algo throughput: 41859941759.2278 bps, 41.8599 Gbps
1084
+ busbw: 40.5518 Gbps
1085
+ ignore me 17760
1086
+ 30:
1087
+ duration: 1.5289 sec
1088
+ algo throughput: 41858890268.6218 bps, 41.8589 Gbps
1089
+ busbw: 40.5508 Gbps
1090
+ ignore me 17760
1091
+ 1:
1092
+ duration: 1.5290 sec
1093
+ algo throughput: 41856634528.5093 bps, 41.8566 Gbps
1094
+ busbw: 40.5486 Gbps
1095
+ ignore me 17760
1096
+ 31:
1097
+ duration: 1.5290 sec
1098
+ algo throughput: 41858450586.8372 bps, 41.8585 Gbps
1099
+ busbw: 40.5504 Gbps
1100
+ ignore me 17760
1101
+ 0:
1102
+ duration: 1.5289 sec
1103
+ algo throughput: 41860374323.0033 bps, 41.8604 Gbps
1104
+ busbw: 40.5522 Gbps
1105
+ ignore me 568326
1106
+ 18:
1107
+ duration: 1.5292 sec
1108
+ algo throughput: 41851192689.6061 bps, 41.8512 Gbps
1109
+ busbw: 40.5433 Gbps
1110
+ ignore me 568326
1111
+ 19:
1112
+ duration: 1.5296 sec
1113
+ algo throughput: 41840982602.8527 bps, 41.8410 Gbps
1114
+ busbw: 40.5335 Gbps
1115
+ ignore me 568326
1116
+ 17:
1117
+ duration: 1.5292 sec
1118
+ algo throughput: 41851389273.1359 bps, 41.8514 Gbps
1119
+ busbw: 40.5435 Gbps
1120
+ ignore me 568326
1121
+ ignore me 568326
1122
+ ignore me 568326
1123
+ 14:
1124
+ duration: 1.5293 sec
1125
+ algo throughput: 41850546358.8408 bps, 41.8505 Gbps
1126
+ busbw: 40.5427 Gbps
1127
+ 20:
1128
+ duration: 1.5296 sec
1129
+ algo throughput: 41841711605.3523 bps, 41.8417 Gbps
1130
+ 15:
1131
+ duration: 1.5292 sec
1132
+ algo throughput: 41850900844.4322 bps, 41.8509 Gbps
1133
+ busbw: 40.5342 Gbps
1134
+ busbw: 40.5431 Gbps
1135
+ ignore me 568326
1136
+ ignore me 568326
1137
+ 13:
1138
+ duration: 1.5293 sec
1139
+ 16:
1140
+ duration: 1.5292 sec
1141
+ algo throughput: 41851732548.4344 bps, 41.8517 Gbps
1142
+ busbw: 40.5439 Gbps
1143
+ algo throughput: 41849491619.2404 bps, 41.8495 Gbps
1144
+ busbw: 40.5417 Gbps
1145
+ ignore me 568326
1146
+ 21:
1147
+ duration: 1.5296 sec
1148
+ algo throughput: 41841051125.8787 bps, 41.8411 Gbps
1149
+ busbw: 40.5335 Gbps
1150
+ ignore me 568326
1151
+ 12:
1152
+ duration: 1.5293 sec
1153
+ algo throughput: 41848837733.7002 bps, 41.8488 Gbps
1154
+ busbw: 40.5411 Gbps
1155
+ ignore me 568326
1156
+ ignore me 568326
1157
+ 22:
1158
+ duration: 1.5295 sec
1159
+ algo throughput: 41842526390.1754 bps, 41.8425 Gbps
1160
+ 11:
1161
+ duration: 1.5292 sec
1162
+ algo throughput: 41851402077.7964 bps, 41.8514 Gbps
1163
+ busbw: 40.5349 Gbps
1164
+ busbw: 40.5435 Gbps
1165
+ ignore me 568326
1166
+ ignore me 568326
1167
+ 25:
1168
+ duration: 1.5289 sec
1169
+ algo throughput: 41860057899.5817 bps, 41.8601 Gbps
1170
+ busbw: 40.5519 Gbps
1171
+ 23:
1172
+ duration: 1.5296 sec
1173
+ algo throughput: 41841328471.6004 bps, 41.8413 Gbps
1174
+ busbw: 40.5338 Gbps
1175
+ ignore me 568326
1176
+ ignore me 568326
1177
+ 10:
1178
+ duration: 1.5293 sec
1179
+ algo throughput: 41850492064.7668 bps, 41.8505 Gbps
1180
+ ignore me 568326
1181
+ busbw: 40.5427 Gbps
1182
+ 26:
1183
+ duration: 1.5289 sec
1184
+ algo throughput: 41861009756.5066 bps, 41.8610 Gbps
1185
+ busbw: 40.5529 Gbps
1186
+ 24:
1187
+ duration: 1.5293 sec
1188
+ algo throughput: 41848595317.3039 bps, 41.8486 Gbps
1189
+ busbw: 40.5408 Gbps
1190
+ ignore me 568326
1191
+ 5:
1192
+ duration: 1.5289 sec
1193
+ algo throughput: 41860676073.0211 bps, 41.8607 Gbps
1194
+ ignore me 568326
1195
+ busbw: 40.5525 Gbps
1196
+ ignore me 568326
1197
+ 27:
1198
+ duration: 1.5288 sec
1199
+ algo throughput: 41861710376.5379 bps, 41.8617 Gbps
1200
+ busbw: 40.5535 Gbps
1201
+ ignore me 568326
1202
+ 6:
1203
+ duration: 1.5292 sec
1204
+ algo throughput: 41852910485.9393 bps, 41.8529 Gbps
1205
+ busbw: 40.5450 Gbps
1206
+ ignore me 568326
1207
+ 9:
1208
+ duration: 1.5292 sec
1209
+ algo throughput: 41850873996.3972 bps, 41.8509 Gbps
1210
+ ignore me 568326
1211
+ busbw: 40.5430 Gbps
1212
+ 4:
1213
+ duration: 1.5288 sec
1214
+ algo throughput: 41861534698.9598 bps, 41.8615 Gbps
1215
+ busbw: 40.5534 Gbps
1216
+ 7:
1217
+ duration: 1.5293 sec
1218
+ algo throughput: 41849369678.9657 bps, 41.8494 Gbps
1219
+ busbw: 40.5416 Gbps
1220
+ ignore me 568326
1221
+ 28:
1222
+ duration: 1.5289 sec
1223
+ algo throughput: 41861383911.2504 bps, 41.8614 Gbps
1224
+ busbw: 40.5532 Gbps
1225
+ ignore me 568326
1226
+ ignore me 568326
1227
+ 8:
1228
+ duration: 1.5293 sec
1229
+ algo throughput: 41848441035.8316 bps, 41.8484 Gbps
1230
+ 3:
1231
+ duration: 1.5289 sec
1232
+ algo throughput: 41861481198.7633 bps, 41.8615 Gbps
1233
+ busbw: 40.5533 Gbps
1234
+ busbw: 40.5407 Gbps
1235
+ ignore me 568326
1236
+ 29:
1237
+ duration: 1.5289 sec
1238
+ algo throughput: 41861138665.5933 bps, 41.8611 Gbps
1239
+ busbw: 40.5530 Gbps
1240
+ ignore me 568326
1241
+ 2:
1242
+ duration: 1.5289 sec
1243
+ algo throughput: 41861040340.5475 bps, 41.8610 Gbps
1244
+ busbw: 40.5529 Gbps
1245
+ ignore me 568326
1246
+ 30:
1247
+ duration: 1.5289 sec
1248
+ algo throughput: 41861393521.7231 bps, 41.8614 Gbps
1249
+ ignore me 568326
1250
+ busbw: 40.5532 Gbps
1251
+ 1:
1252
+ duration: 1.5288 sec
1253
+ algo throughput: 41863250360.5825 bps, 41.8633 Gbps
1254
+ busbw: 40.5550 Gbps
1255
+ ignore me 568326
1256
+ 31:
1257
+ duration: 1.5289 sec
1258
+ algo throughput: 41860930490.0206 bps, 41.8609 Gbps
1259
+ busbw: 40.5528 Gbps
1260
+ ignore me 568326
1261
+ 0:
1262
+ duration: 1.5289 sec
1263
+ algo throughput: 41861381313.3954 bps, 41.8614 Gbps
1264
+ busbw: 40.5532 Gbps
1265
+ ignore me 18186434
1266
+ 18:
1267
+ duration: 1.5304 sec
1268
+ algo throughput: 41819308451.5824 bps, 41.8193 Gbps
1269
+ busbw: 40.5125 Gbps
1270
+ ignore me 18186434
1271
+ 19:
1272
+ duration: 1.5304 sec
1273
+ algo throughput: 41819374415.9696 bps, 41.8194 Gbps
1274
+ busbw: 40.5125 Gbps
1275
+ ignore me 18186434
1276
+ 17:
1277
+ duration: 1.5304 sec
1278
+ algo throughput: 41819400154.7344 bps, 41.8194 Gbps
1279
+ busbw: 40.5125 Gbps
1280
+ ignore me 18186434
1281
+ ignore me 18186434
1282
+ 15:
1283
+ duration: 1.5303 sec
1284
+ algo throughput: 41821175681.0869 bps, 41.8212 Gbps
1285
+ 20:
1286
+ duration: 1.5304 sec
1287
+ algo throughput: 41820265560.0101 bps, 41.8203 Gbps
1288
+ busbw: 40.5134 Gbps
1289
+ busbw: 40.5143 Gbps
1290
+ ignore me 18186434
1291
+ 14:
1292
+ duration: 1.5305 sec
1293
+ algo throughput: 41817412474.7738 bps, 41.8174 Gbps
1294
+ busbw: 40.5106 Gbps
1295
+ ignore me 18186434
1296
+ 16:
1297
+ duration: 1.5304 sec
1298
+ algo throughput: 41820405171.5425 bps, 41.8204 Gbps
1299
+ busbw: 40.5135 Gbps
1300
+ ignore me 18186434
1301
+ ignore me 18186434
1302
+ 21:
1303
+ duration: 1.5304 sec
1304
+ algo throughput: 41820211341.2948 bps, 41.8202 Gbps
1305
+ busbw: 40.5133 Gbps
1306
+ 13:
1307
+ duration: 1.5305 sec
1308
+ algo throughput: 41815893542.3173 bps, 41.8159 Gbps
1309
+ busbw: 40.5091 Gbps
1310
+ ignore me 18186434
1311
+ ignore me 18186434
1312
+ 22:
1313
+ duration: 1.5304 sec
1314
+ algo throughput: 41819993958.8392 bps, 41.8200 Gbps
1315
+ busbw: 40.5131 Gbps
1316
+ 12:
1317
+ duration: 1.5305 sec
1318
+ algo throughput: 41816988451.4211 bps, 41.8170 Gbps
1319
+ busbw: 40.5102 Gbps
1320
+ ignore me 18186434
1321
+ 23:
1322
+ duration: 1.5304 sec
1323
+ algo throughput: 41820013685.7934 bps, 41.8200 Gbps
1324
+ busbw: 40.5131 Gbps
1325
+ ignore me 18186434
1326
+ 11:
1327
+ duration: 1.5306 sec
1328
+ algo throughput: 41813631070.6557 bps, 41.8136 Gbps
1329
+ busbw: 40.5070 Gbps
1330
+ ignore me 18186434
1331
+ 10:
1332
+ duration: 1.5306 sec
1333
+ algo throughput: 41813136230.6469 bps, 41.8131 Gbps
1334
+ busbw: 40.5065 Gbps
1335
+ ignore me 18186434
1336
+ 24:
1337
+ duration: 1.5306 sec
1338
+ algo throughput: 41813362805.8615 bps, 41.8134 Gbps
1339
+ busbw: 40.5067 Gbps
1340
+ ignore me 18186434
1341
+ ignore me 18186434
1342
+ 9:
1343
+ duration: 1.5306 sec
1344
+ algo throughput: 41814612837.9065 bps, 41.8146 Gbps
1345
+ 25:
1346
+ duration: 1.5311 sec
1347
+ algo throughput: 41801050732.9013 bps, 41.8011 Gbps
1348
+ busbw: 40.4948 Gbps
1349
+ busbw: 40.5079 Gbps
1350
+ ignore me 18186434
1351
+ ignore me 18186434
1352
+ 6:
1353
+ duration: 1.5307 sec
1354
+ algo throughput: 41811611108.9466 bps, 41.8116 Gbps
1355
+ busbw: 40.5050 Gbps
1356
+ 7:
1357
+ duration: 1.5305 sec
1358
+ algo throughput: 41815091867.5771 bps, 41.8151 Gbps
1359
+ busbw: 40.5084 Gbps
1360
+ ignore me 18186434
1361
+ 8:
1362
+ duration: 1.5304 sec
1363
+ algo throughput: 41818224707.1108 bps, 41.8182 Gbps
1364
+ busbw: 40.5114 Gbps
1365
+ ignore me 18186434
1366
+ 26:
1367
+ duration: 1.5311 sec
1368
+ algo throughput: 41799543931.1436 bps, 41.7995 Gbps
1369
+ busbw: 40.4933 Gbps
1370
+ ignore me 18186434
1371
+ 5:
1372
+ duration: 1.5311 sec
1373
+ algo throughput: 41800540982.4688 bps, 41.8005 Gbps
1374
+ busbw: 40.4943 Gbps
1375
+ ignore me 18186434
1376
+ 27:
1377
+ duration: 1.5311 sec
1378
+ algo throughput: 41798734639.3871 bps, 41.7987 Gbps
1379
+ busbw: 40.4925 Gbps
1380
+ ignore me 18186434
1381
+ 4:
1382
+ duration: 1.5311 sec
1383
+ algo throughput: 41799893567.7921 bps, 41.7999 Gbps
1384
+ busbw: 40.4936 Gbps
1385
+ ignore me 18186434
1386
+ 28:
1387
+ duration: 1.5312 sec
1388
+ algo throughput: 41798021113.2911 bps, 41.7980 Gbps
1389
+ busbw: 40.4918 Gbps
1390
+ ignore me 18186434
1391
+ 3:
1392
+ duration: 1.5311 sec
1393
+ algo throughput: 41799656984.3057 bps, 41.7997 Gbps
1394
+ busbw: 40.4934 Gbps
1395
+ ignore me 18186434
1396
+ 29:
1397
+ duration: 1.5312 sec
1398
+ ignore me 18186434
1399
+ algo throughput: 41797483455.9485 bps, 41.7975 Gbps
1400
+ busbw: 40.4913 Gbps
1401
+ 2:
1402
+ duration: 1.5312 sec
1403
+ algo throughput: 41797889916.8612 bps, 41.7979 Gbps
1404
+ busbw: 40.4917 Gbps
1405
+ ignore me 18186434
1406
+ 30:
1407
+ duration: 1.5312 sec
1408
+ algo throughput: 41797399459.7577 bps, 41.7974 Gbps
1409
+ busbw: 40.4912 Gbps
1410
+ ignore me 18186434
1411
+ 1:
1412
+ duration: 1.5312 sec
1413
+ algo throughput: 41796838922.8479 bps, 41.7968 Gbps
1414
+ busbw: 40.4907 Gbps
1415
+ ignore me 18186434
1416
+ 31:
1417
+ duration: 1.5312 sec
1418
+ algo throughput: 41798535248.2715 bps, 41.7985 Gbps
1419
+ busbw: 40.4923 Gbps
1420
+ ignore me 18186434
1421
+ 0:
1422
+ duration: 1.5312 sec
1423
+ algo throughput: 41797155891.1448 bps, 41.7972 Gbps
1424
+ busbw: 40.4910 Gbps
experiments/bandwidth/all_reduce_bench.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # python -m torch.distributed.run --nproc_per_node=2 all_reduce_bench.py
2
+
3
+ import argparse
4
+ import fcntl
5
+ import os
6
+ import socket
7
+ import time
8
+ import torch
9
+ import torch.distributed as dist
10
+
11
+ # note: this benchmark doesn't care how many gpus per node one has
12
+
13
+ TRIALS = 5
14
+
15
+ N = 500000
16
+ M = 2000
17
+
18
+ def printflock(*msgs):
19
+ """ print """
20
+ with open(__file__, "r") as fh:
21
+ fcntl.flock(fh, fcntl.LOCK_EX)
22
+ try:
23
+ print(*msgs)
24
+ finally:
25
+ fcntl.flock(fh, fcntl.LOCK_UN)
26
+
27
+ def timed_allreduce(mat, id):
28
+ pre = time.perf_counter()
29
+ dist.all_reduce(mat)
30
+ printflock(f"ignore me {int(mat[0][0])}") # required due to lazy evaluation
31
+ duration = time.perf_counter() - pre
32
+ tput = ((M*N*4*2)/duration)*8 # *2 is for send + receive, *8 for gigabits/second
33
+ size = M * N * 4 # 4 is fp32
34
+ n = dist.get_world_size()
35
+ busbw = (size / duration) * (2 * (n - 1) / n) * 8
36
+ printflock(f"{id}:\n",
37
+ f"duration: {duration:.4f} sec\n",
38
+ f"algo throughput: {tput:.4f} bps, {tput/1e9:.4f} Gbps\n",
39
+ f"busbw: {busbw / 1e9:.4f} Gbps"
40
+ )
41
+
42
+ def run(local_rank):
43
+ hostname = socket.gethostname()
44
+ id = f"{hostname}:{local_rank}"
45
+ global_rank = dist.get_rank()
46
+
47
+ printflock(f"{id} data size: {M*N*4/1e9} GB")
48
+ mat = torch.rand(N, M, dtype=torch.float32).cuda(local_rank)
49
+
50
+ for i in range(TRIALS):
51
+ dist.barrier()
52
+ if global_rank == 0:
53
+ print(f"\n\n\n-----------trial-{i}----------------")
54
+ timed_allreduce(mat, id)
55
+
56
+ def init_processes(local_rank, fn, backend='nccl'):
57
+ torch.cuda.set_device(local_rank)
58
+ dist.init_process_group(backend)
59
+ fn(local_rank)
60
+
61
+
62
+ if __name__ == "__main__":
63
+ rank = int(os.environ["LOCAL_RANK"])
64
+ printflock("local_rank: %d" % rank)
65
+ init_processes(local_rank=rank, fn=run)
66
+
experiments/bandwidth/n16_32gb_all_reduce_bench.txt ADDED
The diff for this file is too large to render. See raw diff
 
experiments/bandwidth/n1_16gb_all_reduce_bench.txt ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export NCCL_DEBUG=info
2
+ python -m torch.distributed.launch --nproc_per_node=4 all_reduce_bench.py
3
+
4
+ *****************************************
5
+ Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
6
+ *****************************************
7
+ local_rank: 2
8
+ local_rank: 3
9
+ local_rank: 1
10
+ local_rank: 0
11
+ 0 data size: 4.0 GB
12
+ 2 data size: 4.0 GB
13
+ 1 data size: 4.0 GB
14
+ 3 data size: 4.0 GB
15
+ r10i4n8:38029:38029 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.8.71<0> [1]ib1:10.149.8.71<0>
16
+ r10i4n8:38029:38029 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
17
+ r10i4n8:38029:38029 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.8.71<0>
18
+ r10i4n8:38029:38029 [0] NCCL INFO Using network IB
19
+ NCCL version 2.7.8+cuda10.2
20
+ r10i4n8:38030:38030 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.8.71<0> [1]ib1:10.149.8.71<0>
21
+ r10i4n8:38030:38030 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
22
+ r10i4n8:38030:38030 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.8.71<0>
23
+ r10i4n8:38030:38030 [1] NCCL INFO Using network IB
24
+ r10i4n8:38032:38032 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.8.71<0> [1]ib1:10.149.8.71<0>
25
+ r10i4n8:38032:38032 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
26
+ r10i4n8:38031:38031 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.8.71<0> [1]ib1:10.149.8.71<0>
27
+ r10i4n8:38031:38031 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
28
+ r10i4n8:38032:38032 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.8.71<0>
29
+ r10i4n8:38032:38032 [3] NCCL INFO Using network IB
30
+ r10i4n8:38031:38031 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.8.71<0>
31
+ r10i4n8:38031:38031 [2] NCCL INFO Using network IB
32
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 00/12 : 0 1 2 3
33
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 01/12 : 0 1 3 2
34
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 02/12 : 0 2 3 1
35
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 03/12 : 0 2 1 3
36
+ r10i4n8:38030:38071 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64
37
+ r10i4n8:38032:38077 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64
38
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 04/12 : 0 3 1 2
39
+ r10i4n8:38031:38081 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64
40
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 05/12 : 0 3 2 1
41
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 06/12 : 0 1 2 3
42
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 07/12 : 0 1 3 2
43
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 08/12 : 0 2 3 1
44
+ r10i4n8:38030:38071 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1
45
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 09/12 : 0 2 1 3
46
+ r10i4n8:38032:38077 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1
47
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 10/12 : 0 3 1 2
48
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 11/12 : 0 3 2 1
49
+ r10i4n8:38031:38081 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1
50
+ r10i4n8:38030:38071 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff00,000fffff
51
+ r10i4n8:38032:38077 [3] NCCL INFO Setting affinity for GPU 3 to ffff,f00000ff,fff00000
52
+ r10i4n8:38031:38081 [2] NCCL INFO Setting affinity for GPU 2 to ffff,f00000ff,fff00000
53
+ r10i4n8:38029:38066 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64
54
+ r10i4n8:38029:38066 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1
55
+ r10i4n8:38029:38066 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff00,000fffff
56
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC
57
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC
58
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC
59
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC
60
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC
61
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC
62
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC
63
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC
64
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC
65
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC
66
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC
67
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC
68
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC
69
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC
70
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC
71
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC
72
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC
73
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC
74
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC
75
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC
76
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC
77
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC
78
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC
79
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC
80
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC
81
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC
82
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC
83
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC
84
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC
85
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC
86
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC
87
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC
88
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC
89
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC
90
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC
91
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC
92
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC
93
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC
94
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC
95
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC
96
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC
97
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC
98
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC
99
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC
100
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC
101
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC
102
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC
103
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC
104
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC
105
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC
106
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC
107
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC
108
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC
109
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC
110
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC
111
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC
112
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC
113
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC
114
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC
115
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC
116
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC
117
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC
118
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC
119
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC
120
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC
121
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC
122
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC
123
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC
124
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC
125
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC
126
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC
127
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC
128
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC
129
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC
130
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC
131
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC
132
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC
133
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC
134
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC
135
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC
136
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC
137
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC
138
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC
139
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC
140
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC
141
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC
142
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC
143
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC
144
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC
145
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC
146
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC
147
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC
148
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC
149
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC
150
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC
151
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC
152
+ r10i4n8:38031:38081 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC
153
+ r10i4n8:38030:38071 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC
154
+ r10i4n8:38032:38077 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC
155
+ r10i4n8:38029:38066 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC
156
+ r10i4n8:38030:38071 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer
157
+ r10i4n8:38030:38071 [1] NCCL INFO comm 0x14dbb0001060 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE
158
+ r10i4n8:38031:38081 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer
159
+ r10i4n8:38031:38081 [2] NCCL INFO comm 0x150950001060 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE
160
+ r10i4n8:38032:38077 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer
161
+ r10i4n8:38032:38077 [3] NCCL INFO comm 0x14ccd8001060 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE
162
+ r10i4n8:38029:38066 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer
163
+ r10i4n8:38029:38066 [0] NCCL INFO comm 0x149bac001060 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE
164
+ r10i4n8:38029:38029 [0] NCCL INFO Launch mode Parallel
165
+ ignore me 1
166
+ ignore me 1
167
+ ignore me 1
168
+ 0:
169
+ duration: 0.6633 sec
170
+ algo throughput: 96488131490.3540 bps, 96.4881 Gbps
171
+ busbw: 72.3661 Gbps
172
+ 1:
173
+ duration: 0.4507 sec
174
+ algo throughput: 142007505620.8443 bps, 142.0075 Gbps
175
+ busbw: 106.5056 Gbps
176
+ 2:
177
+ duration: 0.4203 sec
178
+ algo throughput: 152274131784.9601 bps, 152.2741 Gbps
179
+ busbw: 114.2056 Gbps
180
+ ignore me 1
181
+ 3:
182
+ duration: 0.4225 sec
183
+ algo throughput: 151490688123.0876 bps, 151.4907 Gbps
184
+ busbw: 113.6180 Gbps
185
+ ignore me 7
186
+ ignore me 7
187
+ ignore me 7
188
+ 3:
189
+ duration: 0.0479 sec
190
+ algo throughput: 1336658447010.4644 bps, 1336.6584 Gbps
191
+ busbw: 1002.4938 Gbps
192
+ ignore me 7
193
+ 1:
194
+ duration: 0.0483 sec
195
+ algo throughput: 1325019685494.1951 bps, 1325.0197 Gbps
196
+ busbw: 993.7648 Gbps
197
+ 0:
198
+ duration: 0.0483 sec
199
+ algo throughput: 1323924013812.1467 bps, 1323.9240 Gbps
200
+ busbw: 992.9430 Gbps
201
+ 2:
202
+ duration: 0.0483 sec
203
+ algo throughput: 1324507343140.4290 bps, 1324.5073 Gbps
204
+ busbw: 993.3805 Gbps
205
+ ignore me 31
206
+ ignore me 31
207
+ ignore me 31
208
+ ignore me 31
209
+ 3:
210
+ duration: 0.0479 sec
211
+ algo throughput: 1335850436641.9412 bps, 1335.8504 Gbps
212
+ busbw: 1001.8878 Gbps
213
+ 2:
214
+ duration: 0.0478 sec
215
+ algo throughput: 1338717258044.6157 bps, 1338.7173 Gbps
216
+ busbw: 1004.0379 Gbps
217
+ 0:
218
+ duration: 0.0479 sec
219
+ algo throughput: 1336480609710.5195 bps, 1336.4806 Gbps
220
+ busbw: 1002.3605 Gbps
221
+ 1:
222
+ duration: 0.0479 sec
223
+ algo throughput: 1335644997705.6060 bps, 1335.6450 Gbps
224
+ busbw: 1001.7337 Gbps
225
+ ignore me 124
226
+ ignore me 124
227
+ ignore me 124
228
+ 2:
229
+ duration: 0.0479 sec
230
+ algo throughput: 1337297229056.0354 bps, 1337.2972 Gbps
231
+ busbw: 1002.9729 Gbps
232
+ 0:
233
+ duration: 0.0479 sec
234
+ algo throughput: 1337048861958.8491 bps, 1337.0489 Gbps
235
+ busbw: 1002.7866 Gbps
236
+ ignore me 124
237
+ 1:
238
+ duration: 0.0479 sec
239
+ algo throughput: 1337386146372.2676 bps, 1337.3861 Gbps
240
+ busbw: 1003.0396 Gbps
241
+ 3:
242
+ duration: 0.0480 sec
243
+ algo throughput: 1333613993474.4404 bps, 1333.6140 Gbps
244
+ busbw: 1000.2105 Gbps
245
+ ignore me 496
246
+ ignore me 496
247
+ ignore me 496
248
+ ignore me 496
249
+ 2:
250
+ duration: 0.0481 sec
251
+ algo throughput: 1329998661494.7930 bps, 1329.9987 Gbps
252
+ busbw: 997.4990 Gbps
253
+ 3:
254
+ duration: 0.0480 sec
255
+ algo throughput: 1333082662016.4126 bps, 1333.0827 Gbps
256
+ busbw: 999.8120 Gbps
257
+ 1:
258
+ duration: 0.0481 sec
259
+ algo throughput: 1330394518818.0288 bps, 1330.3945 Gbps
260
+ busbw: 997.7959 Gbps
261
+ 0:
262
+ duration: 0.0481 sec
263
+ algo throughput: 1329424219916.1433 bps, 1329.4242 Gbps
264
+ busbw: 997.0682 Gbps
experiments/bandwidth/n1_32gb_all_reduce_bench.txt ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export NCCL_DEBUG=info
2
+ python -m torch.distributed.launch --nproc_per_node=4 all_reduce_bench.py
3
+
4
+ *****************************************
5
+ Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
6
+ *****************************************
7
+ local_rank: 3
8
+ local_rank: 1
9
+ local_rank: 0
10
+ local_rank: 2
11
+ 0 data size: 4.0 GB
12
+ 2 data size: 4.0 GB
13
+ 3 data size: 4.0 GB
14
+ 1 data size: 4.0 GB
15
+ r7i4n1:63120:63120 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.76<0> [1]ib1:10.149.0.76<0>
16
+ r7i4n1:63120:63120 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
17
+ r7i4n1:63120:63120 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.76<0>
18
+ r7i4n1:63120:63120 [0] NCCL INFO Using network IB
19
+ NCCL version 2.7.8+cuda10.2
20
+ r7i4n1:63123:63123 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.76<0> [1]ib1:10.149.0.76<0>
21
+ r7i4n1:63121:63121 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.76<0> [1]ib1:10.149.0.76<0>
22
+ r7i4n1:63123:63123 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
23
+ r7i4n1:63121:63121 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
24
+ r7i4n1:63121:63121 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.76<0>
25
+ r7i4n1:63123:63123 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.76<0>
26
+ r7i4n1:63121:63121 [1] NCCL INFO Using network IB
27
+ r7i4n1:63123:63123 [3] NCCL INFO Using network IB
28
+ r7i4n1:63122:63122 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.76<0> [1]ib1:10.149.0.76<0>
29
+ r7i4n1:63122:63122 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation
30
+ r7i4n1:63122:63122 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.76<0>
31
+ r7i4n1:63122:63122 [2] NCCL INFO Using network IB
32
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 00/12 : 0 1 2 3
33
+ r7i4n1:63122:63194 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64
34
+ r7i4n1:63121:63193 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64
35
+ r7i4n1:63123:63192 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64
36
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 01/12 : 0 1 3 2
37
+ r7i4n1:63122:63194 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1
38
+ r7i4n1:63121:63193 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1
39
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 02/12 : 0 2 3 1
40
+ r7i4n1:63123:63192 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1
41
+ r7i4n1:63122:63194 [2] NCCL INFO Setting affinity for GPU 2 to ffff,f00000ff,fff00000
42
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 03/12 : 0 2 1 3
43
+ r7i4n1:63121:63193 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff00,000fffff
44
+ r7i4n1:63123:63192 [3] NCCL INFO Setting affinity for GPU 3 to ffff,f00000ff,fff00000
45
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 04/12 : 0 3 1 2
46
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC
47
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 05/12 : 0 3 2 1
48
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 06/12 : 0 1 2 3
49
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 07/12 : 0 1 3 2
50
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 08/12 : 0 2 3 1
51
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 09/12 : 0 2 1 3
52
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 10/12 : 0 3 1 2
53
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 11/12 : 0 3 2 1
54
+ r7i4n1:63120:63191 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64
55
+ r7i4n1:63120:63191 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1
56
+ r7i4n1:63120:63191 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff00,000fffff
57
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC
58
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC
59
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC
60
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC
61
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC
62
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC
63
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC
64
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC
65
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC
66
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC
67
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC
68
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC
69
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC
70
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC
71
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC
72
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC
73
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC
74
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC
75
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC
76
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC
77
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC
78
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC
79
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC
80
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC
81
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC
82
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC
83
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC
84
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC
85
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC
86
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC
87
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC
88
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC
89
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC
90
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC
91
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC
92
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC
93
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC
94
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC
95
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC
96
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC
97
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC
98
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC
99
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC
100
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC
101
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC
102
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC
103
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC
104
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC
105
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC
106
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC
107
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC
108
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC
109
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC
110
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC
111
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC
112
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC
113
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC
114
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC
115
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC
116
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC
117
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC
118
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC
119
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC
120
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC
121
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC
122
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC
123
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC
124
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC
125
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC
126
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC
127
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC
128
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC
129
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC
130
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC
131
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC
132
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC
133
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC
134
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC
135
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC
136
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC
137
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC
138
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC
139
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC
140
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC
141
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC
142
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC
143
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC
144
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC
145
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC
146
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC
147
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC
148
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC
149
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC
150
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC
151
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC
152
+ r7i4n1:63122:63194 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC
153
+ r7i4n1:63121:63193 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC
154
+ r7i4n1:63123:63192 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC
155
+ r7i4n1:63120:63191 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC
156
+ r7i4n1:63121:63193 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer
157
+ r7i4n1:63121:63193 [1] NCCL INFO comm 0x148f80001060 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE
158
+ r7i4n1:63122:63194 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer
159
+ r7i4n1:63122:63194 [2] NCCL INFO comm 0x152f00001060 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE
160
+ r7i4n1:63123:63192 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer
161
+ r7i4n1:63120:63191 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer
162
+ r7i4n1:63123:63192 [3] NCCL INFO comm 0x146050001060 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE
163
+ r7i4n1:63120:63191 [0] NCCL INFO comm 0x14f24c001060 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE
164
+ r7i4n1:63120:63120 [0] NCCL INFO Launch mode Parallel
165
+ ignore me 2
166
+ ignore me 2
167
+ ignore me 2
168
+ 3:
169
+ duration: 0.6125 sec
170
+ algo throughput: 104487664227.6784 bps, 104.4877 Gbps
171
+ busbw: 78.3657 Gbps
172
+ 0:
173
+ duration: 0.5584 sec
174
+ algo throughput: 114613183387.2373 bps, 114.6132 Gbps
175
+ busbw: 85.9599 Gbps
176
+ 2:
177
+ duration: 0.5140 sec
178
+ algo throughput: 124513941981.7996 bps, 124.5139 Gbps
179
+ busbw: 93.3855 Gbps
180
+ ignore me 2
181
+ 1:
182
+ duration: 0.6245 sec
183
+ algo throughput: 102486528362.0469 bps, 102.4865 Gbps
184
+ busbw: 76.8649 Gbps
185
+ ignore me 11
186
+ ignore me 11
187
+ ignore me 11
188
+ ignore me 11
189
+ 1:
190
+ duration: 0.0479 sec
191
+ algo throughput: 1337346013047.7080 bps, 1337.3460 Gbps
192
+ busbw: 1003.0095 Gbps
193
+ 2:
194
+ duration: 0.0482 sec
195
+ algo throughput: 1328071705904.8621 bps, 1328.0717 Gbps
196
+ busbw: 996.0538 Gbps
197
+ 3:
198
+ duration: 0.0483 sec
199
+ algo throughput: 1325052362787.1750 bps, 1325.0524 Gbps
200
+ busbw: 993.7893 Gbps
201
+ 0:
202
+ duration: 0.0483 sec
203
+ algo throughput: 1325619195876.0120 bps, 1325.6192 Gbps
204
+ busbw: 994.2144 Gbps
205
+ ignore me 45
206
+ ignore me 45
207
+ ignore me 45
208
+ ignore me 45
209
+ 1:
210
+ duration: 0.0485 sec
211
+ algo throughput: 1319242278750.3755 bps, 1319.2423 Gbps
212
+ busbw: 989.4317 Gbps
213
+ 3:
214
+ duration: 0.0485 sec
215
+ algo throughput: 1320339103321.9136 bps, 1320.3391 Gbps
216
+ busbw: 990.2543 Gbps
217
+ 2:
218
+ duration: 0.0485 sec
219
+ algo throughput: 1318722904549.9961 bps, 1318.7229 Gbps
220
+ busbw: 989.0422 Gbps
221
+ 0:
222
+ duration: 0.0485 sec
223
+ algo throughput: 1320313583319.3479 bps, 1320.3136 Gbps
224
+ busbw: 990.2352 Gbps
225
+ ignore me 183
226
+ ignore me 183
227
+ ignore me 183
228
+ ignore me 183
229
+ 2:
230
+ duration: 0.0484 sec
231
+ algo throughput: 1322236494553.5015 bps, 1322.2365 Gbps
232
+ busbw: 991.6774 Gbps
233
+ 0:
234
+ duration: 0.0484 sec
235
+ algo throughput: 1321797181142.1807 bps, 1321.7972 Gbps
236
+ busbw: 991.3479 Gbps
237
+ 1:
238
+ duration: 0.0485 sec
239
+ algo throughput: 1318282723325.4265 bps, 1318.2827 Gbps
240
+ busbw: 988.7120 Gbps
241
+ 3:
242
+ duration: 0.0485 sec
243
+ algo throughput: 1320550708735.8535 bps, 1320.5507 Gbps
244
+ busbw: 990.4130 Gbps
245
+ ignore me 733
246
+ ignore me 733
247
+ ignore me 733
248
+ 1:
249
+ duration: 0.0483 sec
250
+ algo throughput: 1323715979433.6658 bps, 1323.7160 Gbps
251
+ busbw: 992.7870 Gbps
252
+ 2:
253
+ duration: 0.0484 sec
254
+ algo throughput: 1322345035832.8503 bps, 1322.3450 Gbps
255
+ busbw: 991.7588 Gbps
256
+ ignore me 733
257
+ 3:
258
+ duration: 0.0484 sec
259
+ algo throughput: 1323624408929.4016 bps, 1323.6244 Gbps
260
+ busbw: 992.7183 Gbps
261
+ 0:
262
+ duration: 0.0485 sec
263
+ algo throughput: 1319272113636.8833 bps, 1319.2721 Gbps
264
+ busbw: 989.4541 Gbps
experiments/gpt2-16gb-nodes.md ADDED
@@ -0,0 +1,1591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GPT2 Experiments
2
+
3
+ Scripts and logs of GPT2 experiments on Jean Zay HPC.
4
+
5
+ Using 4x VT100 16GB nodes.
6
+
7
+ For now can't really allocate many 32gb nodes so can't do any serious evaluation there.
8
+ (add `-C v100-32g` for 32gb nodes.)
9
+
10
+ ## Megatron-LM
11
+
12
+ Constants:
13
+
14
+ - `TP_SIZE` = tensor parallel
15
+ - `PP_SIZE` = pipeline parallel
16
+ - `DP_SIZE` = data parallel is derived automatically from `WORLD_SIZE / (TP_SIZE * PP_SIZE)`
17
+ - `WORLD_SIZE` = total number of GPUs
18
+
19
+ According to Megatron-LM paper the highest degree of TP we can use is 4 for 4-gpu nodes - crossing nodes would slow things down a lot. So max `TP_SIZE=4`. So the full 4 gpu node is used only for tensor parallel dimension.
20
+
21
+
22
+ ### Summary
23
+
24
+ This section summarizes the numbers from the experiment sections below:
25
+
26
+ **Megatron**:
27
+
28
+ Not yet optimized with NVIDIA team!
29
+
30
+ | GPUs | Size | Micro-BS | PP Chunks | DP | PP | Throughput |
31
+ | ---: | ---: | -------: | --------: | --: | -: | ---------: |
32
+ | 16 | 7.5B | 1 | 4 | 1 | 4 | 661ms |
33
+ | 64 | 30B | 1 | 4 | 1 | 16 | 1439ms |
34
+ | 128 | 50B | 1 | 4 | 1 | 32 | 2124ms |
35
+ | 256 | 78B | 1 | 4 | 1 | 64 | 2953ms |
36
+ | 256 | 22B | 1 | 4 | 4 | 16 | 1826ms |
37
+ | | | | | | | |
38
+
39
+
40
+ - `TP=4` in all of entries
41
+ - Throughput is time per iteration - to complete global batch size
42
+ - Global batch size is `micro-batch-size * pp_chunks * dp_size`
43
+ - PP chunks is the number of PP stages, so each pipeline handles `micro-batch-size * pp_chunks`
44
+
45
+ **Megatron + Deepspeed ZeRO**:
46
+
47
+ Not yet optimized with Deepspeed team!
48
+
49
+ | GPUs | Size | Micro-BS | PP Chunks | DP | PP | Throughput |
50
+ | ---: | ---: | -------: | --------: | --: | -: | ---------: |
51
+ | 64 | 30B | 1 | 4 | 1 | 16 | 28716ms |
52
+ | | | | | | | |
53
+
54
+
55
+
56
+
57
+ ### Nodes=4 DP=1 TP=4 PP=4
58
+
59
+ Pre-allocate so that we can run experiments immediately and not wait for slurm to grant us resources:
60
+
61
+ ```
62
+ salloc --account=six@gpu --nodes=4 --ntasks=4 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod
63
+ ```
64
+
65
+ The biggest model we can fit with `micro-batch-size=1`: **7.5B**
66
+
67
+ ```
68
+
69
+ cd $six_ALL_CCFRWORK/code/megatron-lm/
70
+
71
+ CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release
72
+ VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json
73
+ MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt
74
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document
75
+ SAVE_CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/gpt2-1-node
76
+
77
+ GPUS_PER_NODE=4
78
+ NNODES=4
79
+
80
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
81
+ MASTER_PORT=6000
82
+ NODE_RANK=0
83
+
84
+ NHEADS=32
85
+ NHIDDEN=4096
86
+ NLAYERS=36
87
+ SEQ_LEN=1024
88
+ VOCAB_SIZE=50257
89
+
90
+ MICRO_BATCH_SIZE=1
91
+ PP_CHUNKS=4
92
+
93
+ PP_SIZE=4
94
+ DP_SIZE=1
95
+ TP_SIZE=4
96
+
97
+ GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$PP_CHUNKS*$DP_SIZE))
98
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
99
+
100
+ GPT_ARGS=" \
101
+ --num-layers $NLAYERS \
102
+ --hidden-size $NHIDDEN \
103
+ --num-attention-heads $NHEADS \
104
+ --seq-length $SEQ_LEN \
105
+ --max-position-embeddings $SEQ_LEN \
106
+ --micro-batch-size $MICRO_BATCH_SIZE \
107
+ --global-batch-size $GLOBAL_BATCH_SIZE
108
+ --lr 0.00015 \
109
+ --lr-decay-style cosine \
110
+ --min-lr 1.0e-5 \
111
+ --train-iters 1000 \
112
+ --lr-decay-iters 800 \
113
+ --lr-warmup-fraction .01 \
114
+ --weight-decay 1e-2 \
115
+ --clip-grad 1.0 \
116
+ --vocab-file $VOCAB_FILE \
117
+ --merge-file $MERGE_FILE \
118
+ --fp16 \
119
+ --checkpoint-activations \
120
+ "
121
+
122
+ OUTPUT_ARGS=" \
123
+ --log-interval 10 \
124
+ --save-interval 500 \
125
+ --eval-interval 100 \
126
+ --eval-iters 10 \
127
+ "
128
+
129
+ export LAUNCHER="python -u -m torch.distributed.launch \
130
+ --nproc_per_node $GPUS_PER_NODE \
131
+ --nnodes $NNODES \
132
+ --master_addr $MASTER_ADDR \
133
+ --master_port $MASTER_PORT \
134
+ "
135
+
136
+ export CMD=" \
137
+ `pwd`/pretrain_gpt.py \
138
+ --tensor-model-parallel-size $TP_SIZE \
139
+ --pipeline-model-parallel-size $PP_SIZE \
140
+ $GPT_ARGS \
141
+ $OUTPUT_ARGS \
142
+ --save $SAVE_CHECKPOINT_PATH \
143
+ --load $SAVE_CHECKPOINT_PATH \
144
+ --data-path $DATA_PATH \
145
+ --data-impl mmap \
146
+ --split 949,50,1 \
147
+ --distributed-backend nccl \
148
+ "
149
+
150
+ # clear old checkpoint as it'd mismatch while we sort things out
151
+ rm -rf $six_ALL_CCFRWORK/checkpoints/gpt2-1-node
152
+
153
+ # model size
154
+ python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l * (12*h**2 + 13*h) + (v * h) + (s * h) ) / 10**9 :.0f}B')"
155
+
156
+ # to debug - add echo (it exits and prints what it would have launched)
157
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD'
158
+
159
+
160
+ ```
161
+
162
+ Stats:
163
+
164
+ ```
165
+ iteration 50/ 1000 | consumed samples: 200 | elapsed time per iteration (ms): 661.3 | learning rate:
166
+ 1.497E-04 | global batch size: 4 | lm loss: 8.238016E+00 | loss scale: 16384.0 | grad norm: 2.555 |
167
+ number of skipped iterations: 0 | number of nan iterations: 0 | time (ms) | forward-compute: 92.25 |
168
+ forward-recv: 65.68 | backward-compute: 239.82 | backward-send: 0.54 | backward-send-forward-recv:
169
+ 4.29 | backward-params-all-reduce: 10.50 | backward-embedding-all-reduce: 204.76 |
170
+ optimizer-copy-to-main-grad: 4.47 | optimizer-unscale-and-check-inf: 5.68 |
171
+ optimizer-clip-main-grad: 8.56 | optimizer-copy-main-to-model-params: 4.41 | optimizer: 42.31 |
172
+ batch-generator: 2.70
173
+ ```
174
+
175
+
176
+ ### Nodes=16 DP=1 TP=4 PP=16
177
+
178
+
179
+ Pre-allocate so that we can run experiments immediately and not wait for slurm to grant us resources:
180
+
181
+ ```
182
+ salloc --account=six@gpu --nodes=16 --ntasks=16 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod
183
+ ```
184
+
185
+ The biggest model we can fit with `micro-batch-size=1`: barely **30B**
186
+
187
+ (30B is not in paper's table - took 39B model and reduced NHIDDEN=7168 to overcome OOM) but it still OOM'ed after 60 steps so was a bit too much.
188
+
189
+ ```
190
+
191
+ cd $six_ALL_CCFRWORK/code/megatron-lm/
192
+
193
+ CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release
194
+ VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json
195
+ MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt
196
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document
197
+ SAVE_CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/gpt2-1-node
198
+
199
+ GPUS_PER_NODE=4
200
+ NNODES=16
201
+
202
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
203
+ MASTER_PORT=6000
204
+ NODE_RANK=0
205
+
206
+ NHEADS=32
207
+ NHIDDEN=7168
208
+ NLAYERS=48
209
+ SEQ_LEN=1024
210
+
211
+ MICRO_BATCH_SIZE=1
212
+ PP_CHUNKS=4
213
+
214
+ PP_SIZE=16
215
+ DP_SIZE=1
216
+ TP_SIZE=4
217
+
218
+ GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$PP_CHUNKS*$DP_SIZE))
219
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
220
+
221
+ GPT_ARGS=" \
222
+ --num-layers $NLAYERS \
223
+ --hidden-size $NHIDDEN \
224
+ --num-attention-heads $NHEADS \
225
+ --seq-length $SEQ_LEN \
226
+ --max-position-embeddings $SEQ_LEN \
227
+ --micro-batch-size $MICRO_BATCH_SIZE \
228
+ --global-batch-size $GLOBAL_BATCH_SIZE
229
+ --lr 0.00015 \
230
+ --lr-decay-style cosine \
231
+ --min-lr 1.0e-5 \
232
+ --train-iters 1000 \
233
+ --lr-decay-iters 800 \
234
+ --lr-warmup-fraction .01 \
235
+ --weight-decay 1e-2 \
236
+ --clip-grad 1.0 \
237
+ --vocab-file $VOCAB_FILE \
238
+ --merge-file $MERGE_FILE \
239
+ --fp16 \
240
+ --checkpoint-activations \
241
+ "
242
+
243
+ OUTPUT_ARGS=" \
244
+ --log-interval 10 \
245
+ --save-interval 500 \
246
+ --eval-interval 100 \
247
+ --eval-iters 10 \
248
+ "
249
+
250
+ export LAUNCHER="python -u -m torch.distributed.launch \
251
+ --nproc_per_node $GPUS_PER_NODE \
252
+ --nnodes $NNODES \
253
+ --master_addr $MASTER_ADDR \
254
+ --master_port $MASTER_PORT \
255
+ "
256
+
257
+ export CMD=" \
258
+ `pwd`/pretrain_gpt.py \
259
+ --tensor-model-parallel-size $TP_SIZE \
260
+ --pipeline-model-parallel-size $PP_SIZE \
261
+ $GPT_ARGS \
262
+ $OUTPUT_ARGS \
263
+ --save $SAVE_CHECKPOINT_PATH \
264
+ --load $SAVE_CHECKPOINT_PATH \
265
+ --data-path $DATA_PATH \
266
+ --data-impl mmap \
267
+ --split 949,50,1 \
268
+ --distributed-backend nccl \
269
+ "
270
+
271
+ # clear old checkpoint as it'd mismatch while we sort things out
272
+ rm -rf $six_ALL_CCFRWORK/checkpoints/gpt2-1-node
273
+
274
+ # to debug - add echo (it exits and prints what it would have launched)
275
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD'
276
+
277
+
278
+ ```
279
+
280
+ Stats:
281
+
282
+
283
+
284
+ ```
285
+ iteration 30/ 1000 | consumed samples: 120 | elapsed time per iteration (ms): 1439.3 | learning
286
+ rate: 1.500E-04 | global batch size: 4 | lm loss: 2.667133E+01 | loss scale: 16384.0 | grad norm:
287
+ 73.338 | number of skipped iterations: 1 | number of nan iterations: 0 | time (ms) |
288
+ forward-compute: 77.94 | forward-recv: 285.81 | backward-compute: 203.21 | backward-send: 0.91 |
289
+ backward-send-forward-recv: 5.44 | backward-params-all-reduce: 10.38 |
290
+ backward-embedding-all-reduce: 811.34 | optimizer-copy-to-main-grad: 4.61 |
291
+ optimizer-unscale-and-check-inf: 7.90 | optimizer-clip-main-grad: 7.91 |
292
+ optimizer-copy-main-to-model-params: 3.95 | optimizer: 43.19 | batch-generator: 2.64
293
+ ```
294
+
295
+ ### Nodes=32 DP=1 TP=4 PP=32
296
+
297
+ Pre-allocate so that we can run experiments immediately and not wait for slurm to grant us resources:
298
+
299
+ ```
300
+ salloc --account=six@gpu --nodes=32 --ntasks=32 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod
301
+ ```
302
+
303
+ The biggest model we can fit with `micro-batch-size=1`: **50B**
304
+
305
+ (50B is not in paper's table - took 76B model - had to change to nlayer=64 for it to work and reduced NHIDDEN=8192 to overcome OOM) but it still OOM'ed after 60 steps so was a bit too much.
306
+
307
+ ```
308
+ perl -le 'print( (120*402780160+8*514977792)>>20)'
309
+ 50023
310
+ ```
311
+
312
+ ```
313
+
314
+ cd $six_ALL_CCFRWORK/code/megatron-lm/
315
+
316
+ CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release
317
+ VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json
318
+ MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt
319
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document
320
+ SAVE_CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/gpt2-1-node
321
+
322
+ GPUS_PER_NODE=4
323
+ NNODES=32
324
+
325
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
326
+ MASTER_PORT=6000
327
+ NODE_RANK=0
328
+
329
+ NHEADS=32
330
+ NHIDDEN=8192
331
+ NLAYERS=64
332
+ SEQ_LEN=1024
333
+
334
+ MICRO_BATCH_SIZE=1
335
+ PP_CHUNKS=4
336
+
337
+ PP_SIZE=32
338
+ DP_SIZE=1
339
+ TP_SIZE=4
340
+
341
+ GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$PP_CHUNKS*$DP_SIZE))
342
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
343
+
344
+ GPT_ARGS=" \
345
+ --num-layers $NLAYERS \
346
+ --hidden-size $NHIDDEN \
347
+ --num-attention-heads $NHEADS \
348
+ --seq-length $SEQ_LEN \
349
+ --max-position-embeddings $SEQ_LEN \
350
+ --micro-batch-size $MICRO_BATCH_SIZE \
351
+ --global-batch-size $GLOBAL_BATCH_SIZE
352
+ --lr 0.00015 \
353
+ --lr-decay-style cosine \
354
+ --min-lr 1.0e-5 \
355
+ --train-iters 1000 \
356
+ --lr-decay-iters 800 \
357
+ --lr-warmup-fraction .01 \
358
+ --weight-decay 1e-2 \
359
+ --clip-grad 1.0 \
360
+ --vocab-file $VOCAB_FILE \
361
+ --merge-file $MERGE_FILE \
362
+ --fp16 \
363
+ --checkpoint-activations \
364
+ "
365
+
366
+ OUTPUT_ARGS=" \
367
+ --log-interval 10 \
368
+ --save-interval 500 \
369
+ --eval-interval 100 \
370
+ --eval-iters 10 \
371
+ "
372
+
373
+ export LAUNCHER="python -u -m torch.distributed.launch \
374
+ --nproc_per_node $GPUS_PER_NODE \
375
+ --nnodes $NNODES \
376
+ --master_addr $MASTER_ADDR \
377
+ --master_port $MASTER_PORT \
378
+ "
379
+
380
+ export CMD=" \
381
+ `pwd`/pretrain_gpt.py \
382
+ --tensor-model-parallel-size $TP_SIZE \
383
+ --pipeline-model-parallel-size $PP_SIZE \
384
+ $GPT_ARGS \
385
+ $OUTPUT_ARGS \
386
+ --save $SAVE_CHECKPOINT_PATH \
387
+ --load $SAVE_CHECKPOINT_PATH \
388
+ --data-path $DATA_PATH \
389
+ --data-impl mmap \
390
+ --split 949,50,1 \
391
+ --distributed-backend nccl \
392
+ "
393
+
394
+ # clear old checkpoint as it'd mismatch while we sort things out
395
+ rm -rf $six_ALL_CCFRWORK/checkpoints/gpt2-1-node
396
+
397
+ # to debug - add echo (it exits and prints what it would have launched)
398
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD'
399
+
400
+
401
+ ```
402
+
403
+ Stats:
404
+
405
+ ```
406
+ iteration 50/ 1000 | consumed samples: 200 | elapsed time per iteration (ms): 2124.0 | learning
407
+ rate: 1.497E-04 | global batch size: 4 | lm loss: 1.038553E+01 | loss scale: 16384.0 | grad norm:
408
+ 14.954 | number of skipped iterations: 0 | number of nan iterations: 0 | time (ms) |
409
+ forward-compute: 68.08 | forward-recv: 485.51 | backward-compute: 175.50 | backward-send: 0.85 |
410
+ backward-send-forward-recv: 5.63 | backward-params-all-reduce: 9.54 | backward-embedding-all-reduce:
411
+ 1321.49 | optimizer-copy-to-main-grad: 4.19 | optimizer-unscale-and-check-inf: 21.21 |
412
+ optimizer-clip-main-grad: 8.04 | optimizer-copy-main-to-model-params: 3.98 | optimizer: 56.47 |
413
+ batch-generator: 2.72
414
+
415
+ ```
416
+
417
+
418
+
419
+
420
+
421
+ ### Nodes=64 DP=1 TP=4 PP=64
422
+
423
+ Pre-allocate so that we can run experiments immediately and not wait for slurm to grant us resources:
424
+
425
+ ```
426
+ salloc --account=six@gpu --nodes=64 --ntasks=64 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod
427
+ ```
428
+
429
+ The biggest model we can fit with `micro-batch-size=1`: **78B**
430
+
431
+ (78B is not in paper's table - took 76B model - had to change to nlayers=64 for it to work)
432
+
433
+ ```
434
+ perl -le 'print( (248*314652160+8*454899200)>>20)'
435
+ 77889
436
+ ```
437
+
438
+ ```
439
+
440
+ cd $six_ALL_CCFRWORK/code/megatron-lm/
441
+
442
+ CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release
443
+ VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json
444
+ MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt
445
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document
446
+ SAVE_CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/gpt2-1-node
447
+
448
+ GPUS_PER_NODE=4
449
+ NNODES=64
450
+
451
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
452
+ MASTER_PORT=6000
453
+ NODE_RANK=0
454
+
455
+ NHEADS=32
456
+ NHIDDEN=10240
457
+ NLAYERS=64
458
+ SEQ_LEN=1024
459
+
460
+ MICRO_BATCH_SIZE=1
461
+ PP_CHUNKS=4
462
+
463
+ PP_SIZE=64
464
+ DP_SIZE=1
465
+ TP_SIZE=4
466
+
467
+ GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$PP_CHUNKS*$DP_SIZE))
468
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
469
+
470
+ GPT_ARGS=" \
471
+ --num-layers $NLAYERS \
472
+ --hidden-size $NHIDDEN \
473
+ --num-attention-heads $NHEADS \
474
+ --seq-length $SEQ_LEN \
475
+ --max-position-embeddings $SEQ_LEN \
476
+ --micro-batch-size $MICRO_BATCH_SIZE \
477
+ --global-batch-size $GLOBAL_BATCH_SIZE
478
+ --lr 0.00015 \
479
+ --lr-decay-style cosine \
480
+ --min-lr 1.0e-5 \
481
+ --train-iters 1000 \
482
+ --lr-decay-iters 800 \
483
+ --lr-warmup-fraction .01 \
484
+ --weight-decay 1e-2 \
485
+ --clip-grad 1.0 \
486
+ --vocab-file $VOCAB_FILE \
487
+ --merge-file $MERGE_FILE \
488
+ --fp16 \
489
+ --checkpoint-activations \
490
+ "
491
+
492
+ OUTPUT_ARGS=" \
493
+ --log-interval 10 \
494
+ --save-interval 500 \
495
+ --eval-interval 100 \
496
+ --eval-iters 10 \
497
+ "
498
+
499
+ export LAUNCHER="python -u -m torch.distributed.launch \
500
+ --nproc_per_node $GPUS_PER_NODE \
501
+ --nnodes $NNODES \
502
+ --master_addr $MASTER_ADDR \
503
+ --master_port $MASTER_PORT \
504
+ "
505
+
506
+ export CMD=" \
507
+ `pwd`/pretrain_gpt.py \
508
+ --tensor-model-parallel-size $TP_SIZE \
509
+ --pipeline-model-parallel-size $PP_SIZE \
510
+ $GPT_ARGS \
511
+ $OUTPUT_ARGS \
512
+ --save $SAVE_CHECKPOINT_PATH \
513
+ --load $SAVE_CHECKPOINT_PATH \
514
+ --data-path $DATA_PATH \
515
+ --data-impl mmap \
516
+ --split 949,50,1 \
517
+ --distributed-backend nccl \
518
+ "
519
+
520
+ # clear old checkpoint as it'd mismatch while we sort things out
521
+ rm -rf $six_ALL_CCFRWORK/checkpoints/gpt2-1-node
522
+
523
+ # to debug - add echo (it exits and prints what it would have launched)
524
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD'
525
+
526
+ ```
527
+
528
+ Stats:
529
+
530
+ ```
531
+ iteration 30/ 1000 | consumed samples: 120 | elapsed time per iteration (ms): 2953.3 | learning
532
+ rate: 1.500E-04 | global batch size: 4 | lm loss: 3.785040E+01 | loss scale: 16384.0 | grad norm:
533
+ 47.681 | number of skipped iterations: 1 | number of nan iterations: 0 | time (ms) |
534
+ forward-compute: 53.67 | forward-recv: 746.59 | backward-compute: 134.74 | backward-send: 1.01 |
535
+ backward-send-forward-recv: 6.49 | backward-params-all-reduce: 8.29 | backward-embedding-all-reduce:
536
+ 1964.85 | optimizer-copy-to-main-grad: 3.64 | optimizer-unscale-and-check-inf: 8.68 |
537
+ optimizer-clip-main-grad: 6.34 | optimizer-copy-main-to-model-params: 3.10 | optimizer: 36.80 |
538
+ batch-generator: 2.52
539
+ ```
540
+
541
+
542
+
543
+ ### Nodes=64 DP=4 TP=4 PP=16
544
+
545
+ Let's try a smaller model with a larger batch size.
546
+
547
+ Pre-allocate so that we can run experiments immediately and not wait for slurm to grant us resources:
548
+
549
+ ```
550
+ salloc --account=six@gpu --nodes=64 --ntasks=64 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod
551
+ ```
552
+
553
+ The biggest model we can fit with `micro-batch-size=1` + D4: **22B**
554
+
555
+ ```
556
+ perl -le 'print( (48*402780160+8*514977792)>>20)'
557
+ 22366
558
+ ```
559
+
560
+ ```
561
+
562
+ cd $six_ALL_CCFRWORK/code/megatron-lm/
563
+
564
+ CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release
565
+ VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json
566
+ MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt
567
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document
568
+ SAVE_CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/gpt2-1-node
569
+
570
+ GPUS_PER_NODE=4
571
+ NNODES=64
572
+
573
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
574
+ MASTER_PORT=6000
575
+ NODE_RANK=0
576
+
577
+ NHEADS=32
578
+ NHIDDEN=8192
579
+ NLAYERS=32
580
+ SEQ_LEN=1024
581
+
582
+ MICRO_BATCH_SIZE=1
583
+ PP_CHUNKS=4
584
+ GAS=$PP_CHUNKS
585
+
586
+ PP_SIZE=16
587
+ DP_SIZE=4
588
+ TP_SIZE=4
589
+
590
+ GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$PP_CHUNKS*$DP_SIZE))
591
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
592
+
593
+ GPT_ARGS=" \
594
+ --num-layers $NLAYERS \
595
+ --hidden-size $NHIDDEN \
596
+ --num-attention-heads $NHEADS \
597
+ --seq-length $SEQ_LEN \
598
+ --max-position-embeddings $SEQ_LEN \
599
+ --micro-batch-size $MICRO_BATCH_SIZE \
600
+ --global-batch-size $GLOBAL_BATCH_SIZE \
601
+ --gas $GAS \
602
+ --lr 0.00015 \
603
+ --lr-decay-style cosine \
604
+ --min-lr 1.0e-5 \
605
+ --train-iters 1000 \
606
+ --lr-decay-iters 800 \
607
+ --lr-warmup-fraction .01 \
608
+ --weight-decay 1e-2 \
609
+ --clip-grad 1.0 \
610
+ --vocab-file $VOCAB_FILE \
611
+ --merge-file $MERGE_FILE \
612
+ --fp16 \
613
+ --checkpoint-activations \
614
+ "
615
+
616
+ OUTPUT_ARGS=" \
617
+ --log-interval 10 \
618
+ --save-interval 500 \
619
+ --eval-interval 100 \
620
+ --eval-iters 10 \
621
+ "
622
+
623
+ export LAUNCHER="python -u -m torch.distributed.launch \
624
+ --nproc_per_node $GPUS_PER_NODE \
625
+ --nnodes $NNODES \
626
+ --master_addr $MASTER_ADDR \
627
+ --master_port $MASTER_PORT \
628
+ "
629
+
630
+ export CMD=" \
631
+ `pwd`/pretrain_gpt.py \
632
+ --tensor-model-parallel-size $TP_SIZE \
633
+ --pipeline-model-parallel-size $PP_SIZE \
634
+ $GPT_ARGS \
635
+ $OUTPUT_ARGS \
636
+ --save $SAVE_CHECKPOINT_PATH \
637
+ --load $SAVE_CHECKPOINT_PATH \
638
+ --data-path $DATA_PATH \
639
+ --data-impl mmap \
640
+ --split 949,50,1 \
641
+ --distributed-backend nccl \
642
+ "
643
+
644
+ # clear old checkpoint as it'd mismatch while we sort things out
645
+ rm -rf $six_ALL_CCFRWORK/checkpoints/gpt2-1-node
646
+
647
+ # to debug - add echo (it exits and prints what it would have launched)
648
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD'
649
+
650
+
651
+ ```
652
+
653
+ Stats:
654
+
655
+ ```
656
+ iteration 40/ 1000 | consumed samples: 640 | elapsed time per iteration (ms): 1826.3 | learning
657
+ rate: 1.499E-04 | global batch size: 16 | lm loss: 1.290925E+01 | loss scale: 16384.0 | grad norm:
658
+ 7.607 | number of skipped iterations: 0 | number of nan iterations: 0 |
659
+ time (ms) | forward-compute: 80.84 | forward-recv: 225.57 | backward-compute: 172.26 |
660
+ backward-send: 0.86 | backward-send-forward-recv: 5.76 | backward-params-all-reduce: 307.62 |
661
+ backward-embedding-all-reduce: 746.14 | optimizer-copy-to-main-grad: 4.20 |
662
+ optimizer-unscale-and-check-inf: 250.90 | optimizer-clip-main-grad: 8.06 |
663
+ optimizer-copy-main-to-model-params: 3.99 | optimizer: 286.27 | batch-generator: 2.72
664
+
665
+
666
+ ```
667
+
668
+
669
+
670
+
671
+ ## Megatron + Deepspeed ZeRO
672
+
673
+ **Important**: `DeepSpeedExamples/Megatron-LM-v1.1.5-ZeRO3` is not in sync with M-LM master - so several config args don't match.
674
+
675
+ Status: Unoptimized
676
+
677
+ ### Nodes=16
678
+
679
+
680
+ ```
681
+ salloc --account=six@gpu --nodes=16 --ntasks=16 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod
682
+ ```
683
+
684
+ Todo:
685
+
686
+ 46B experiment:
687
+ NHEADS=32
688
+ NHIDDEN=9216
689
+ NLAYERS=48
690
+ SEQ_LEN=1024
691
+ VOCAB_SIZE=50257
692
+
693
+
694
+ ```
695
+
696
+ cd $six_ALL_CCFRWORK/code/DeepSpeedExamples/Megatron-LM-v1.1.5-ZeRO3
697
+
698
+ CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release
699
+ VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json
700
+ MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt
701
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document
702
+ SAVE_CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/gpt2-meg-ds
703
+
704
+ GPUS_PER_NODE=4
705
+ NNODES=16
706
+
707
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
708
+ MASTER_PORT=6000
709
+ NODE_RANK=0
710
+
711
+ NHEADS=32
712
+ NHIDDEN=7168
713
+ NLAYERS=48
714
+ SEQ_LEN=1024
715
+ VOCAB_SIZE=50257
716
+
717
+ MICRO_BATCH_SIZE=16
718
+ PP_CHUNKS=4
719
+
720
+ PP_SIZE=16
721
+ DP_SIZE=2
722
+ TP_SIZE=2
723
+
724
+ GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$PP_CHUNKS*$DP_SIZE))
725
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
726
+
727
+ # --micro-batch-size $MICRO_BATCH_SIZE \
728
+ # --lr-warmup-fraction .01 \
729
+ # --global-batch-size $GLOBAL_BATCH_SIZE
730
+ GPT_ARGS=" \
731
+ --num-layers $NLAYERS \
732
+ --hidden-size $NHIDDEN \
733
+ --num-attention-heads $NHEADS \
734
+ --seq-length $SEQ_LEN \
735
+ --max-position-embeddings $SEQ_LEN \
736
+ --batch-size $MICRO_BATCH_SIZE \
737
+ --train-iters 1000 \
738
+ --lr-decay-iters 800 \
739
+ --vocab-file $VOCAB_FILE \
740
+ --merge-file $MERGE_FILE \
741
+ --lr 1.5e-4 \
742
+ --lr-decay-style cosine \
743
+ --min-lr 1.0e-5 \
744
+ --weight-decay 1e-2 \
745
+ --clip-grad 1.0 \
746
+ --warmup 0.01 \
747
+ --fp16 \
748
+ "
749
+
750
+ OUTPUT_ARGS=" \
751
+ --log-interval 1 \
752
+ --save-interval 500 \
753
+ --eval-interval 100 \
754
+ --eval-iters 10 \
755
+ "
756
+
757
+ #ZeRO Configs
758
+ gradient_accumulation_steps=1
759
+ reduce_bucket_size=$(($NHIDDEN*$NHIDDEN))
760
+ stage3_prefetch_bucket_size=$(($NHIDDEN*$NHIDDEN*9/10))
761
+ stage3_param_persistence_threshold=$((10*$NHIDDEN))
762
+
763
+ # Here it is different from the other setup
764
+ train_batch_size=$(($WORLD_SIZE*$MICRO_BATCH_SIZE*$gradient_accumulation_steps))
765
+
766
+ config_json="./ds_zero_stage_3_config.json"
767
+
768
+ # "train_batch_size": $train_batch_size,
769
+
770
+ cat <<EOT > $config_json
771
+ {
772
+ "gradient_accumulation_steps": $gradient_accumulation_steps,
773
+ "steps_per_print": 10,
774
+ "zero_optimization": {
775
+ "stage": 3,
776
+ "stage3_max_live_parameters": 1e9,
777
+ "stage3_max_reuse_distance": 1e9,
778
+ "stage3_prefetch_bucket_size": $stage3_prefetch_bucket_size,
779
+ "stage3_param_persitence_threshold": $stage3_param_persistence_threshold,
780
+ "reduce_bucket_size": $reduce_bucket_size,
781
+ "contiguous_gradients": true
782
+ },
783
+ "gradient_clipping": 1.0,
784
+ "fp16": {
785
+ "enabled": true,
786
+ "loss_scale": 0,
787
+ "initial_scale_power": 10,
788
+ "loss_scale_window": 1000,
789
+ "hysteresis": 2,
790
+ "min_loss_scale": 1
791
+ },
792
+ "wall_clock_breakdown": false,
793
+ "zero_allow_untested_optimizer": false
794
+ }
795
+ EOT
796
+
797
+ MP_SIZE=$TP_SIZE
798
+
799
+ stage=3
800
+ reduce_scatter=true
801
+ contigious_gradients=true
802
+ rbs=50000000
803
+ agbs=5000000000
804
+
805
+ #Activation Checkpointing and Contigious Memory
806
+ chkp_layers=1
807
+ PA=true
808
+ PA_CPU=true
809
+ CC=true
810
+ SYNCHRONIZE=true
811
+ PROFILE=false
812
+
813
+ # TiledLinear splits, 0 is disable
814
+ TILED_LINEAR="false"
815
+ TILE_DIM=1
816
+
817
+
818
+ DEEPSPEED_ARGS=" \
819
+ --deepspeed \
820
+ --deepspeed_config ${config_json} \
821
+ --zero-stage ${stage} \
822
+ --zero-reduce-bucket-size ${rbs} \
823
+ --zero-allgather-bucket-size ${agbs} \
824
+ "
825
+
826
+ if [ "${contigious_gradients}" = "true" ]; then
827
+ DEEPSPEED_ARGS="${DEEPSPEED_ARGS} \
828
+ --zero-contigious-gradients"
829
+ fi
830
+
831
+ if [ "${reduce_scatter}" = "true" ]; then
832
+ DEEPSPEED_ARGS="${DEEPSPEED_ARGS} \
833
+ --zero-reduce-scatter"
834
+ fi
835
+
836
+ CHKP_ARGS=" \
837
+ --checkpoint-activations \
838
+ --deepspeed-activation-checkpointing \
839
+ --checkpoint-num-layers ${chkp_layers}"
840
+
841
+ if [ "${PA}" = "true" ]; then
842
+ CHKP_ARGS="${CHKP_ARGS} --partition-activations"
843
+ fi
844
+
845
+ if [ "${PA_CPU}" = "true" ]; then
846
+ CHKP_ARGS="${CHKP_ARGS} \
847
+ --checkpoint-in-cpu"
848
+ fi
849
+
850
+ if [ "${SYNCHRONIZE}" = "true" ]; then
851
+ CHKP_ARGS="${CHKP_ARGS} \
852
+ --synchronize-each-layer"
853
+ fi
854
+
855
+ if [ "${CC}" = "true" ]; then
856
+ CHKP_ARGS="${CHKP_ARGS} \
857
+ --contigious-checkpointing"
858
+ fi
859
+
860
+ if [ "${PROFILE}" = "true" ]; then
861
+ CHKP_ARGS="${CHKP_ARGS} \
862
+ --profile-backward"
863
+ fi
864
+
865
+ if [ "${TILED_LINEAR}" = "true" ]; then
866
+ tile_opt="${tile_opt} \
867
+ --memory-centric-tiled-linear \
868
+ --tile-factor=${TILE_DIM}"
869
+ fi
870
+
871
+ export LAUNCHER="python -u -m torch.distributed.launch \
872
+ --nproc_per_node $GPUS_PER_NODE \
873
+ --nnodes $NNODES \
874
+ --master_addr $MASTER_ADDR \
875
+ --master_port $MASTER_PORT \
876
+ "
877
+
878
+ # --tensor-model-parallel-size $TP_SIZE \
879
+ # --pipeline-model-parallel-size $PP_SIZE \
880
+ export CMD=" \
881
+ `pwd`/pretrain_gpt2.py \
882
+ --model-parallel-size $TP_SIZE \
883
+ $GPT_ARGS \
884
+ $OUTPUT_ARGS \
885
+ --save $SAVE_CHECKPOINT_PATH \
886
+ --load $SAVE_CHECKPOINT_PATH \
887
+ --data-path $DATA_PATH \
888
+ --data-impl mmap \
889
+ --split 949,50,1 \
890
+ --distributed-backend nccl \
891
+ $DEEPSPEED_ARGS \
892
+ $CHKP_ARGS \
893
+ "
894
+
895
+ rm -rf $six_ALL_CCFRWORK/checkpoints/gpt2-meg-ds
896
+
897
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD'
898
+
899
+ ```
900
+
901
+ Stats:
902
+
903
+ ```
904
+ iteration 20/ 1000 | elapsed time per iteration (ms): 28716.0 | learning rate: 1.500E-04 | lm loss:
905
+ 2.324108E+01 | loss scale: 1024.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
906
+ time (ms) | forward: 5495.35 | backward: 22976.72 | backward-backward: 22976.69 |
907
+ backward-allreduce: 0.00 | optimizer: 243.03 | batch generator: 1.00 Effective Tera Flops per GPU:
908
+ 0.21 and total parameters 29.998 B
909
+ ```
910
+
911
+
912
+ ## Megatron + Deepspeed 3D Parallelism
913
+
914
+ **Important**: `DeepSpeedExamples/Megatron-LM-v1.1.5-3D_parallelism` is not in sync with M-LM master - so several config args don't match.
915
+
916
+ Status: Unoptimized
917
+
918
+ ### Nodes=16
919
+
920
+
921
+ ```
922
+ salloc --account=six@gpu --nodes=16 --ntasks=16 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod
923
+ ```
924
+
925
+
926
+ ```
927
+
928
+ cd $six_ALL_CCFRWORK/code/DeepSpeedExamples/Megatron-LM-v1.1.5-3D_parallelism
929
+
930
+ CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release
931
+ VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json
932
+ MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt
933
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document
934
+ SAVE_CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/gpt2-meg-ds
935
+
936
+ GPUS_PER_NODE=4
937
+ NNODES=16
938
+
939
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
940
+ MASTER_PORT=6000
941
+ NODE_RANK=0
942
+
943
+ NHEADS=32
944
+ NHIDDEN=7168
945
+ NLAYERS=48
946
+ SEQ_LEN=1024
947
+ VOCAB_SIZE=50257
948
+
949
+ MICRO_BATCH_SIZE=1
950
+ PP_CHUNKS=4
951
+ GAS=$PP_CHUNKS
952
+
953
+ PP_SIZE=16
954
+ DP_SIZE=1
955
+ TP_SIZE=4
956
+
957
+ GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$PP_CHUNKS*$DP_SIZE))
958
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
959
+
960
+ # --micro-batch-size $MICRO_BATCH_SIZE \
961
+ # --lr-warmup-fraction .01 \
962
+ # --global-batch-size $GLOBAL_BATCH_SIZE
963
+ GPT_ARGS=" \
964
+ --num-layers $NLAYERS \
965
+ --hidden-size $NHIDDEN \
966
+ --num-attention-heads $NHEADS \
967
+ --seq-length $SEQ_LEN \
968
+ --max-position-embeddings $SEQ_LEN \
969
+ --batch-size $MICRO_BATCH_SIZE \
970
+ --gas $GAS \
971
+ --train-iters 1000 \
972
+ --lr-decay-iters 800 \
973
+ --vocab-file $VOCAB_FILE \
974
+ --merge-file $MERGE_FILE \
975
+ --lr 1.5e-4 \
976
+ --lr-decay-style cosine \
977
+ --min-lr 1.0e-5 \
978
+ --weight-decay 1e-2 \
979
+ --clip-grad 1.0 \
980
+ --warmup 0.01 \
981
+ --fp16 \
982
+ "
983
+
984
+ OUTPUT_ARGS=" \
985
+ --log-interval 10 \
986
+ --save-interval 500 \
987
+ --eval-interval 100 \
988
+ --eval-iters 10 \
989
+ "
990
+
991
+ #ZeRO Configs
992
+ gradient_accumulation_steps=1
993
+ reduce_bucket_size=$(($NHIDDEN*$NHIDDEN))
994
+ stage3_prefetch_bucket_size=$(($NHIDDEN*$NHIDDEN*9/10))
995
+ stage3_param_persistence_threshold=$((10*$NHIDDEN))
996
+ train_batch_size=$(($DP_SIZE*$MICRO_BATCH_SIZE*$gradient_accumulation_steps))
997
+
998
+ config_json="./ds_config.json"
999
+
1000
+ cat <<EOT > $config_json
1001
+ {
1002
+ "train_batch_size": $train_batch_size,
1003
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
1004
+ "gradient_accumulation_steps": $gradient_accumulation_steps,
1005
+ "steps_per_print": 10,
1006
+ "gradient_clipping": 1.0,
1007
+ "fp16": {
1008
+ "enabled": true,
1009
+ "loss_scale": 0,
1010
+ "initial_scale_power": 10,
1011
+ "loss_scale_window": 1000,
1012
+ "hysteresis": 2,
1013
+ "min_loss_scale": 1
1014
+ },
1015
+ "wall_clock_breakdown": false,
1016
+ "zero_allow_untested_optimizer": false
1017
+ }
1018
+ EOT
1019
+
1020
+ MP_SIZE=$TP_SIZE
1021
+
1022
+ stage=0
1023
+ reduce_scatter=true
1024
+ contigious_gradients=true
1025
+ rbs=50000000
1026
+ agbs=5000000000
1027
+
1028
+ #Activation Checkpointing and Contigious Memory
1029
+ chkp_layers=1
1030
+ PA=true
1031
+ PA_CPU=false
1032
+ CC=true
1033
+ SYNCHRONIZE=true
1034
+ PROFILE=false
1035
+
1036
+ DEEPSPEED_ARGS=" \
1037
+ --deepspeed \
1038
+ --deepspeed_config ${config_json} \
1039
+ --zero-stage ${stage} \
1040
+ --zero-reduce-bucket-size ${rbs} \
1041
+ --zero-allgather-bucket-size ${agbs} \
1042
+ "
1043
+
1044
+ DEEPSPEED_ARGS=" \
1045
+ --deepspeed \
1046
+ --deepspeed_config ${config_json} \
1047
+ --zero-stage ${stage} \
1048
+ --zero-reduce-bucket-size ${rbs} \
1049
+ --zero-allgather-bucket-size ${agbs} \
1050
+ "
1051
+
1052
+ if [ "${contigious_gradients}" = "true" ]; then
1053
+ DEEPSPEED_ARGS="${DEEPSPEED_ARGS} \
1054
+ --zero-contigious-gradients"
1055
+ fi
1056
+
1057
+ if [ "${reduce_scatter}" = "true" ]; then
1058
+ DEEPSPEED_ARGS="${DEEPSPEED_ARGS} \
1059
+ --zero-reduce-scatter"
1060
+ fi
1061
+
1062
+ CHKP_ARGS=" \
1063
+ --checkpoint-activations \
1064
+ --checkpoint-num-layers ${chkp_layers}"
1065
+
1066
+ if [ "${PA}" = "true" ]; then
1067
+ CHKP_ARGS="${CHKP_ARGS} \
1068
+ --partition-activations"
1069
+ fi
1070
+
1071
+ if [ "${PA_CPU}" = "true" ]; then
1072
+ CHKP_ARGS="${CHKP_ARGS} \
1073
+ --checkpoint-in-cpu"
1074
+ fi
1075
+
1076
+ if [ "${SYNCHRONIZE}" = "true" ]; then
1077
+ CHKP_ARGS="${CHKP_ARGS} \
1078
+ --synchronize-each-layer"
1079
+ fi
1080
+
1081
+ if [ "${CC}" = "true" ]; then
1082
+ CHKP_ARGS="${CHKP_ARGS} \
1083
+ --contigious-checkpointing"
1084
+ fi
1085
+
1086
+ if [ "${PROFILE}" = "true" ]; then
1087
+ CHKP_ARGS="${CHKP_ARGS} \
1088
+ --profile-backward"
1089
+ fi
1090
+
1091
+ export LAUNCHER="python -u -m torch.distributed.launch \
1092
+ --nproc_per_node $GPUS_PER_NODE \
1093
+ --nnodes $NNODES \
1094
+ --master_addr $MASTER_ADDR \
1095
+ --master_port $MASTER_PORT \
1096
+ "
1097
+
1098
+ # --tensor-model-parallel-size $TP_SIZE \
1099
+ # --pipeline-model-parallel-size $PP_SIZE \
1100
+ export CMD=" \
1101
+ `pwd`/pretrain_gpt2.py \
1102
+ --model-parallel-size $TP_SIZE \
1103
+ --pipe-parallel-size $PP_SIZE \
1104
+ $GPT_ARGS \
1105
+ $OUTPUT_ARGS \
1106
+ --save $SAVE_CHECKPOINT_PATH \
1107
+ --load $SAVE_CHECKPOINT_PATH \
1108
+ --data-path $DATA_PATH \
1109
+ --data-impl mmap \
1110
+ --split 949,50,1 \
1111
+ --distributed-backend nccl \
1112
+ $DEEPSPEED_ARGS \
1113
+ $CHKP_ARGS \
1114
+ "
1115
+
1116
+ rm -rf $six_ALL_CCFRWORK/checkpoints/gpt2-meg-ds
1117
+
1118
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD'
1119
+
1120
+ # can't figure out how to launch from salloc
1121
+ #
1122
+ # r10i5n[5-6],r10i6n[4-5,7-8],r10i7n[0,4-5],r11i3n[3-6],r13i1n[2-4]
1123
+ function makehostfile() {
1124
+ perl -e '$slots=split /,/, $ENV{"SLURM_STEP_GPUS"};
1125
+ $slots=4 if $slots==0; # workaround
1126
+ while ($ENV{"SLURM_JOB_NODELIST"} =~ m/(\w+)(?:\[([\d-,]+)\])?,?/msg) {
1127
+ $b=$1; $s=$2||q[""]; $s=~s/-/../g;
1128
+ print map { "$b$_ slots=$slots\n" } eval $s }'
1129
+ }
1130
+ makehostfile > hostfile
1131
+ #
1132
+ #
1133
+ # srun --jobid $SLURM_JOBID deepspeed -H `pwd`/hostfile --num_nodes ${NNODES} --num_gpus ${GPUS_PER_NODE} $CMD
1134
+ #
1135
+
1136
+ # to kill hanging python processes on all nodes at once
1137
+ # srun pkill python
1138
+
1139
+ ```
1140
+
1141
+ Stats:
1142
+ ```
1143
+ iteration 650/ 1000 | elapsed time per iteration (ms): 1210.1 | learning rate: 1.450E-05 | lm loss:
1144
+ 7.287670E+00 | loss scale: 8192.0 | number of skipped iterations: 0 | number of nan iterations: 0 |
1145
+ time (ms) | forward: 0.00 | backward: 0.00 | optimizer: 0.00 | batch generator: 0.00
1146
+
1147
+ ```
1148
+
1149
+ ```
1150
+ | N/A 50C P0 181W / 300W | 13236MiB / 32510MiB | 99% Default |
1151
+ | 0 N/A N/A 72371 C .../conda/hf-prod/bin/python 13233MiB |
1152
+ | 1 N/A N/A 72372 C .../conda/hf-prod/bin/python 13193MiB |
1153
+ | 2 N/A N/A 72373 C .../conda/hf-prod/bin/python 13161MiB |
1154
+ | 3 N/A N/A 72374 C .../conda/hf-prod/bin/python 13169MiB |
1155
+ ```
1156
+
1157
+ ## HF + Deepspeed ZeRO
1158
+
1159
+ ### Nodes=16 ZeRO-2
1160
+
1161
+
1162
+ ```
1163
+ salloc --account=six@gpu --nodes=16 --ntasks=16 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod
1164
+ ```
1165
+
1166
+ 32GB nodes
1167
+
1168
+ This works - at about 25GB / gpus - very slow 20s/it
1169
+
1170
+ Model size: 3.5B
1171
+
1172
+ Higher model the 40GB/gpu limit is passed and processes get killed.
1173
+
1174
+ We don't have zero.Init() here so the whole model is loaded onto each process - not possible to scale.
1175
+
1176
+ This memory gets released afterwards, but we don't have enough to bypass that hump.
1177
+
1178
+ ```
1179
+
1180
+ # use custom PR branch to handle the model creation on the fly
1181
+ cd $six_ALL_CCFRWORK/code/transformers-clm-any-model-config/
1182
+
1183
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
1184
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
1185
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
1186
+
1187
+ MODEL=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron-gpt2-345m
1188
+ DATASET="stas/openwebtext-10k"
1189
+
1190
+ GPUS_PER_NODE=4
1191
+ NNODES=16
1192
+
1193
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
1194
+ MASTER_PORT=6000
1195
+
1196
+ NHEADS=32
1197
+ NHIDDEN=3072
1198
+ NLAYERS=30
1199
+ SEQ_LEN=1024
1200
+ VOCAB_SIZE=50257
1201
+
1202
+ export LAUNCHER="python -u -m torch.distributed.launch \
1203
+ --nproc_per_node $GPUS_PER_NODE \
1204
+ --nnodes $NNODES \
1205
+ --master_addr $MASTER_ADDR \
1206
+ --master_port $MASTER_PORT \
1207
+ "
1208
+
1209
+
1210
+ config_json="./ds_z2_no_offload.json"
1211
+ cat <<EOT > $config_json
1212
+ {
1213
+ "fp16": {
1214
+ "enabled": "auto",
1215
+ "loss_scale": 0,
1216
+ "loss_scale_window": 1000,
1217
+ "initial_scale_power": 16,
1218
+ "hysteresis": 2,
1219
+ "min_loss_scale": 1
1220
+ },
1221
+
1222
+ "optimizer": {
1223
+ "type": "AdamW",
1224
+ "params": {
1225
+ "lr": "auto",
1226
+ "betas": "auto",
1227
+ "eps": "auto",
1228
+ "weight_decay": "auto"
1229
+ }
1230
+ },
1231
+
1232
+ "scheduler": {
1233
+ "type": "WarmupLR",
1234
+ "params": {
1235
+ "warmup_min_lr": "auto",
1236
+ "warmup_max_lr": "auto",
1237
+ "warmup_num_steps": "auto"
1238
+ }
1239
+ },
1240
+
1241
+ "zero_optimization": {
1242
+ "stage": 2,
1243
+ "allgather_partitions": true,
1244
+ "allgather_bucket_size": 2e8,
1245
+ "overlap_comm": true,
1246
+ "reduce_scatter": true,
1247
+ "reduce_bucket_size": 2e8,
1248
+ "contiguous_gradients": true,
1249
+ "cpu_offload": true
1250
+ },
1251
+
1252
+ "gradient_accumulation_steps": "auto",
1253
+ "gradient_clipping": "auto",
1254
+ "steps_per_print": 2000,
1255
+ "train_batch_size": "auto",
1256
+ "train_micro_batch_size_per_gpu": "auto",
1257
+ "wall_clock_breakdown": false
1258
+ }
1259
+ EOT
1260
+
1261
+ export PYTHONPATH=src
1262
+ export HF_DATASETS_OFFLINE=1
1263
+ export TRANSFORMERS_OFFLINE=1
1264
+ export USE_TF=0
1265
+
1266
+ # deepspeed -H `pwd`/hostfile-exp2 --num_nodes $NNODES --num_gpus $GPUS_PER_NODE \
1267
+ export CMD=" \
1268
+ examples/pytorch/language-modeling/run_clm.py \
1269
+ --model_type gpt2 \
1270
+ --tokenizer_name gpt2 \
1271
+ --config_overrides "n_embd=$NHIDDEN,n_head=$NHEADS,n_layer=$NLAYERS,n_positions=$SEQ_LEN" \
1272
+ --dataset_name $DATASET \
1273
+ --output_dir output_dir \
1274
+ --overwrite_output_dir \
1275
+ --do_train \
1276
+ --do_eval \
1277
+ --max_train_samples 10000 \
1278
+ --max_eval_samples 1000 \
1279
+ --per_device_train_batch_size 4 \
1280
+ --per_device_eval_batch_size 4 \
1281
+ --num_train_epochs 1 \
1282
+ --warmup_steps 8 \
1283
+ --fp16 \
1284
+ --report_to none \
1285
+ --deepspeed $config_json \
1286
+ "
1287
+
1288
+ # model size
1289
+ python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l * (12*h**2 + 13*h) + (v * h) + (s * h) ) / 10**9 :.0f}B')"
1290
+
1291
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD'
1292
+
1293
+
1294
+ ```
1295
+
1296
+ Stats:
1297
+
1298
+ ```
1299
+
1300
+
1301
+ ```
1302
+
1303
+
1304
+ ## Node16 ZeRO-3 + CPU Offload
1305
+
1306
+ 32GB nodes
1307
+
1308
+ Model size: 7B
1309
+
1310
+
1311
+ ```
1312
+
1313
+ # use custom PR branch to handle the model creation on the fly
1314
+ cd $six_ALL_CCFRWORK/code/transformers-clm-any-model-config/
1315
+
1316
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
1317
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
1318
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
1319
+
1320
+ MODEL=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron-gpt2-345m
1321
+ DATASET="stas/openwebtext-10k"
1322
+
1323
+ GPUS_PER_NODE=4
1324
+ NNODES=2
1325
+
1326
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
1327
+ MASTER_PORT=6000
1328
+
1329
+ NHEADS=32
1330
+ NHIDDEN=1024
1331
+ NLAYERS=10
1332
+ SEQ_LEN=1024
1333
+ VOCAB_SIZE=50257
1334
+
1335
+ export LAUNCHER="python -u -m torch.distributed.launch \
1336
+ --nproc_per_node $GPUS_PER_NODE \
1337
+ --nnodes $NNODES \
1338
+ --master_addr $MASTER_ADDR \
1339
+ --master_port $MASTER_PORT \
1340
+ "
1341
+
1342
+
1343
+ config_json="./ds_z3_cpu_offload.json"
1344
+ cat <<EOT > $config_json
1345
+ {
1346
+ "fp16": {
1347
+ "enabled": "auto",
1348
+ "loss_scale": 0,
1349
+ "loss_scale_window": 1000,
1350
+ "initial_scale_power": 16,
1351
+ "hysteresis": 2,
1352
+ "min_loss_scale": 1
1353
+ },
1354
+
1355
+ "optimizer": {
1356
+ "type": "AdamW",
1357
+ "params": {
1358
+ "lr": "auto",
1359
+ "betas": "auto",
1360
+ "eps": "auto",
1361
+ "weight_decay": "auto"
1362
+ }
1363
+ },
1364
+
1365
+ "scheduler": {
1366
+ "type": "WarmupLR",
1367
+ "params": {
1368
+ "warmup_min_lr": "auto",
1369
+ "warmup_max_lr": "auto",
1370
+ "warmup_num_steps": "auto"
1371
+ }
1372
+ },
1373
+
1374
+ "zero_optimization": {
1375
+ "stage": 3,
1376
+ "offload_optimizer": {
1377
+ "device": "cpu",
1378
+ "pin_memory": true
1379
+ },
1380
+ "offload_param": {
1381
+ "device": "cpu",
1382
+ "pin_memory": true
1383
+ },
1384
+ "overlap_comm": true,
1385
+ "contiguous_gradients": true,
1386
+ "sub_group_size": 1e14,
1387
+ "reduce_bucket_size": "auto",
1388
+ "stage3_prefetch_bucket_size": "auto",
1389
+ "stage3_param_persistence_threshold": "auto",
1390
+ "stage3_max_live_parameters": 1e9,
1391
+ "stage3_max_reuse_distance": 1e9,
1392
+ "stage3_gather_fp16_weights_on_model_save": true
1393
+ },
1394
+
1395
+ "gradient_accumulation_steps": "auto",
1396
+ "gradient_clipping": "auto",
1397
+ "steps_per_print": 2000,
1398
+ "train_batch_size": "auto",
1399
+ "train_micro_batch_size_per_gpu": "auto",
1400
+ "wall_clock_breakdown": false
1401
+ }
1402
+ EOT
1403
+
1404
+ export PYTHONPATH=src
1405
+ export HF_DATASETS_OFFLINE=1
1406
+ export TRANSFORMERS_OFFLINE=1
1407
+ export USE_TF=0
1408
+
1409
+ # deepspeed -H `pwd`/hostfile-exp2 --num_nodes $NNODES --num_gpus $GPUS_PER_NODE \
1410
+ export CMD=" \
1411
+ examples/pytorch/language-modeling/run_clm.py \
1412
+ --model_type gpt2 \
1413
+ --tokenizer_name gpt2 \
1414
+ --config_overrides "n_embd=$NHIDDEN,n_head=$NHEADS,n_layer=$NLAYERS,n_positions=$SEQ_LEN" \
1415
+ --dataset_name $DATASET \
1416
+ --output_dir output_dir \
1417
+ --overwrite_output_dir \
1418
+ --do_train \
1419
+ --do_eval \
1420
+ --max_train_samples 10000 \
1421
+ --max_eval_samples 1000 \
1422
+ --per_device_train_batch_size 4 \
1423
+ --per_device_eval_batch_size 4 \
1424
+ --num_train_epochs 1 \
1425
+ --warmup_steps 8 \
1426
+ --fp16 \
1427
+ --report_to none \
1428
+ --deepspeed $config_json \
1429
+ "
1430
+
1431
+ # model size
1432
+ python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l * (12*h**2 + 13*h) + (v * h) + (s * h) ) / 10**9 :.0f}B')"
1433
+
1434
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD'
1435
+
1436
+ ```
1437
+
1438
+
1439
+ Stats:
1440
+
1441
+ ```
1442
+
1443
+ ```
1444
+
1445
+
1446
+ ### Trying deepspeed launcher again
1447
+
1448
+
1449
+ ```
1450
+
1451
+ #!/bin/bash
1452
+ #SBATCH --job-name=hf_ds_gpt2_multi_node_test
1453
+ #SBATCH --nodes=2
1454
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
1455
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
1456
+ #SBATCH --hint=nomultithread # we get physical cores not logical
1457
+ #SBATCH --gres=gpu:4 # number of gpus
1458
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
1459
+ #SBATCH --output=%x-%j.out # output file name
1460
+ #SBATCH --error=%x-%j.out # error file name (same to watch just one file)
1461
+ #SBATCH --account=six@gpu
1462
+
1463
+ # use custom PR branch to handle the model creation on the fly
1464
+ cd $six_ALL_CCFRWORK/code/transformers-clm-any-model-config/
1465
+
1466
+ source $six_ALL_CCFRWORK/start-prod
1467
+
1468
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
1469
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
1470
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
1471
+
1472
+ MODEL=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron-gpt2-345m
1473
+ DATASET="stas/openwebtext-10k"
1474
+
1475
+ GPUS_PER_NODE=4
1476
+ NNODES=2
1477
+
1478
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
1479
+ MASTER_PORT=6000
1480
+
1481
+ NHEADS=32
1482
+ NHIDDEN=1024
1483
+ NLAYERS=10
1484
+ SEQ_LEN=1024
1485
+ VOCAB_SIZE=50257
1486
+
1487
+ export LAUNCHER="python -u -m torch.distributed.launch \
1488
+ --nproc_per_node $GPUS_PER_NODE \
1489
+ --nnodes $NNODES \
1490
+ --master_addr $MASTER_ADDR \
1491
+ --master_port $MASTER_PORT \
1492
+ "
1493
+
1494
+
1495
+ config_json="./ds_z3_cpu_offload.json"
1496
+ cat <<EOT > $config_json
1497
+ {
1498
+ "fp16": {
1499
+ "enabled": "auto",
1500
+ "loss_scale": 0,
1501
+ "loss_scale_window": 1000,
1502
+ "initial_scale_power": 16,
1503
+ "hysteresis": 2,
1504
+ "min_loss_scale": 1
1505
+ },
1506
+
1507
+ "optimizer": {
1508
+ "type": "AdamW",
1509
+ "params": {
1510
+ "lr": "auto",
1511
+ "betas": "auto",
1512
+ "eps": "auto",
1513
+ "weight_decay": "auto"
1514
+ }
1515
+ },
1516
+
1517
+ "scheduler": {
1518
+ "type": "WarmupLR",
1519
+ "params": {
1520
+ "warmup_min_lr": "auto",
1521
+ "warmup_max_lr": "auto",
1522
+ "warmup_num_steps": "auto"
1523
+ }
1524
+ },
1525
+
1526
+ "zero_optimization": {
1527
+ "stage": 3,
1528
+ "offload_optimizer": {
1529
+ "device": "cpu",
1530
+ "pin_memory": true
1531
+ },
1532
+ "offload_param": {
1533
+ "device": "cpu",
1534
+ "pin_memory": true
1535
+ },
1536
+ "overlap_comm": true,
1537
+ "contiguous_gradients": true,
1538
+ "sub_group_size": 1e14,
1539
+ "reduce_bucket_size": "auto",
1540
+ "stage3_prefetch_bucket_size": "auto",
1541
+ "stage3_param_persistence_threshold": "auto",
1542
+ "stage3_max_live_parameters": 1e9,
1543
+ "stage3_max_reuse_distance": 1e9,
1544
+ "stage3_gather_fp16_weights_on_model_save": true
1545
+ },
1546
+
1547
+ "gradient_accumulation_steps": "auto",
1548
+ "gradient_clipping": "auto",
1549
+ "steps_per_print": 2000,
1550
+ "train_batch_size": "auto",
1551
+ "train_micro_batch_size_per_gpu": "auto",
1552
+ "wall_clock_breakdown": false
1553
+ }
1554
+ EOT
1555
+
1556
+ export PYTHONPATH=src
1557
+ export HF_DATASETS_OFFLINE=1
1558
+ export TRANSFORMERS_OFFLINE=1
1559
+ export USE_TF=0
1560
+
1561
+ export CMD=" \
1562
+ deepspeed --num_nodes $NNODES --num_gpus $GPUS_PER_NODE \
1563
+ examples/pytorch/language-modeling/run_clm.py \
1564
+ --model_type gpt2 \
1565
+ --tokenizer_name gpt2 \
1566
+ --config_overrides "n_embd=$NHIDDEN,n_head=$NHEADS,n_layer=$NLAYERS,n_positions=$SEQ_LEN" \
1567
+ --dataset_name $DATASET \
1568
+ --output_dir output_dir \
1569
+ --overwrite_output_dir \
1570
+ --do_train \
1571
+ --do_eval \
1572
+ --max_train_samples 10000 \
1573
+ --max_eval_samples 1000 \
1574
+ --per_device_train_batch_size 4 \
1575
+ --per_device_eval_batch_size 4 \
1576
+ --num_train_epochs 1 \
1577
+ --warmup_steps 8 \
1578
+ --fp16 \
1579
+ --report_to none \
1580
+ --deepspeed $config_json \
1581
+ "
1582
+
1583
+ # model size
1584
+ python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l * (12*h**2 + 13*h) + (v * h) + (s * h) ) / 10**9 :.0f}B')"
1585
+
1586
+ #srun --jobid $SLURM_JOBID bash -c '$CMD'
1587
+ srun --jobid $SLURM_JOBID bash -c '$CMD'
1588
+
1589
+
1590
+
1591
+ ```
experiments/gpt2-meg-ds-3d-old/meg_ds_3d_gpt2_perf_n16.out ADDED
The diff for this file is too large to render. See raw diff
 
experiments/gpt2.md ADDED
@@ -0,0 +1,592 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GPT2 Experiments
2
+
3
+ Scripts and logs of GPT2 experiments on Jean Zay HPC.
4
+
5
+ Using 4x VT100 32GB nodes.
6
+
7
+ (add `-C v100-32g` for 32gb nodes.)
8
+
9
+ ## Apples and Oranges
10
+
11
+ JZ seems to give us inconsistent performance - so each allocation may give performance that can vary as much as 40%, so the numbers in the summaries of this document are very hard to compare. We thought it had to do with the proximity of the allocated nodes but it proved to vary randomly through the day, most likely highly dependening on the traffic on the JZ network.
12
+
13
+ Therefore any results you will find in this summary are +/-40% correct. An identical test scored 40% faster or slower on the same allocation at different times of the day.
14
+
15
+ ## Megatron-LM
16
+
17
+ Constants:
18
+
19
+ - `TP_SIZE` = tensor parallel
20
+ - `PP_SIZE` = pipeline parallel
21
+ - `DP_SIZE` = data parallel is derived automatically from `WORLD_SIZE / (TP_SIZE * PP_SIZE)`
22
+ - `WORLD_SIZE` = total number of GPUs
23
+
24
+ According to Megatron-LM paper the highest degree of TP we can use is 4 for 4-gpu nodes - crossing nodes would slow things down a lot. So max `TP_SIZE=4`. So the full 4 gpu node is used only for tensor parallel dimension.
25
+
26
+ ## Metrics
27
+
28
+ TFlops: `model_size_in_B * 4 * 2 * seq * global_batch_size / (time_in_sec_per_interation * total_gpus * 1e3)`
29
+
30
+ The factor of 4 is when used with activation check-pointing,
31
+ otherwise it will be 3, but for 200B model, activation check-pointing will always be on.
32
+
33
+ The peak of V100 32gb gpu is about 125 TFlops/sec [spec](https://images.nvidia.com/content/technologies/volta/pdf/volta-v100-datasheet-update-us-1165301-r5.pdf). But we cannot get the peak. The max achievable performance will be 30-60TFlops depending on the model size. So if you see low 20s, the model is not tuned well, if you see, over 100 then there is a bug in the calculation. 
34
+
35
+ For v100 16gb gpus the max spec is 120 TFlops/sec.
36
+
37
+ ## Allocation
38
+
39
+ ```
40
+ salloc --constraint=v100-32g --nodes=16 --ntasks=16 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod
41
+ ```
42
+
43
+
44
+ ### Megatron
45
+
46
+ The full slurm scripts and log files are at [`gpt2-meg`](./gpt2-meg):
47
+ - scripts starting with `meg_gpt2_base_` are for getting the baseline with tiny BS
48
+ - scripts starting with `meg_gpt2_perf_` are for smaller model, and tuned for high performance
49
+
50
+ Not yet optimized with NVIDIA team!
51
+
52
+ Metrics can be calculated in bash after figuring out the throughput (in seconds):
53
+
54
+ ```
55
+ THROUGHPUT=122
56
+ NNODES=16
57
+ MSIZE=52
58
+ MICRO_BATCH_SIZE=4
59
+ DP_SIZE=1
60
+ PP_CHUNKS=256
61
+ echo "($MSIZE*4*2*1024*$MICRO_BATCH_SIZE*$DP_SIZE*$PP_CHUNKS)/($THROUGHPUT*$NNODES*4*1000)" | bc -l
62
+ 55.86675409836065573770
63
+ ```
64
+
65
+ **Max model size**
66
+
67
+ These first results are all about how big of a model can be fit into the given the hardware on the smallest batch size, disregarding throughput.
68
+
69
+ 16GB nodes:
70
+
71
+ | GPUs | Size | DP | PP | PP Chunks | Mic-BS | Glob-BS | Speed | TFlops |
72
+ | ---: | ---: | -: | -: | --------: | -----: | -----: | -----: | -----: |
73
+ | 16 | 7.5B | 1 | 4 | 4 | 1 | 4 | 0.661s | 23.2 |
74
+ | 64 | 30B | 1 | 16 | 4 | 1 | 4 | 1.439s | 10.7 |
75
+ | 128 | 50B | 1 | 32 | 4 | 1 | 4 | 2.124s | 6.0 |
76
+ | 256 | 78B | 1 | 64 | 4 | 1 | 4 | 2.953s | 3.4 |
77
+ | 256 | 22B | 4 | 16 | 4 | 1 | 4 | 1.826s | 1.5 |
78
+ | | | | | | | | | |
79
+
80
+ 32GB nodes:
81
+
82
+ | GPUs | Size | DP | PP | PP Chunks | Mic-BS | Glob-BS | Speed | TFlops |
83
+ | ---: | ---: | -: | -: | --------: | -----: | -----: | -----: | -----: |
84
+ | 16 | 18B | 1 | 4 | 4 | 1 | 4 | 1.381s | 26.7 |
85
+ | 32 | 30B | 1 | 8 | 4 | 1 | 4 | 1.618s | 19.0 |
86
+ | 64 | 65B | 1 | 16 | 4 | 1 | 4 | 2.738s | 12.2 |
87
+ | 128 | 116B | 1 | 32 | 4 | 1 | 4 | 4.234s | 7.0 |
88
+ | 256 | 206B | 1 | 64 | 4 | 1 | 4 | 6.736s | 3.9 |
89
+ | | | | | | | | | |
90
+
91
+ The TFLops are very low because there are too few PP chunks/micro-batches (4) (gradient accumulation size / GAS) and so the bubble takes a lot of overhead, increasing PP chunks should dramatically improve performance but also need to lower the max model size to have memory to hold those chunks in.
92
+
93
+ **Performance**
94
+
95
+ These experiments are to try a lower model size, but much higher TFlops performance
96
+
97
+ | GPUs | Size | DP | PP | PP Chunks | Mic-BS | Glob-BS | Speed | TFlops | Notes |
98
+ | ---: | ---: | -: | -: | --------: | -----: | -----: | ----: | -----: | ----: |
99
+ | 16 | 18B | 1 | 8 | 64 | 4 | 256 | 90.5s | 26.1 | 05-26 |
100
+ | 16 | 18B | 1 | 8 | 128 | 4 | 512 | 177s | 26.7 | 05-26 |
101
+ | 16 | 18B | 1 | 8 | 256 | 4 | 1024 | 356s | 26.5 | 05-26 |
102
+ | | | | | | | | | | |
103
+ | 16 | 18B | 1 | 4 | 128 | 4 | 512 | 179s | 26.4 | 05-26 |
104
+ | 16 | 18B | 1 | 4 | 128 | 6 | 768 | 262s | 27.0 | 05-26 |
105
+ | 16 | 18B | 1 | 8 | 128 | 6 | 768 | 259s | 27.3 | 05-26 |
106
+ | 16 | 18B | 1 | 8 | 32 | 8 | 256 | 89s | 26.5 | 05-26 |
107
+ | | | | | | | | | | |
108
+ | 32 | 39B | 1 | 8 | 128 | 4 | 512 | 82s | 62.3 | 05-26 |
109
+ | 32 | 39B | 1 | 8 | 128 | 6 | 768 | 123s | 62.3 | 05-26 |
110
+ | 32 | 39B | 1 | 8 | 256 | 6 | 1536 | 241s | 63.6 | 05-26 |
111
+ | 32 | 39B | 1 | 8 | 512 | 6 | 3072 | 478s | 64.2 | 05-26 |
112
+ | | | | | | | | | | |
113
+ | 64 | 52B | 1 | 16 | 256 | 4 | 1024 | 129s | 52.8 | 05-25 |
114
+ | 64 | 52B | 1 | 16 | 256 | 4 | 1024 | 217s | 31.4 | 05-26 |
115
+ | 64 | 52B | 1 | 16 | 256 | 4 | 1024 | 125s | 54.5 | 05-27 |
116
+ | 64 | 52B | 1 | 16 | 256 | 4 | 1024 | 225s | 30.3 | 05-28 |
117
+ | | | | | | | | | | |
118
+ | 64 | 52B | 1 | 16 | 256 | 6 | 1536 | 328s | 31.2 | 05-26 |
119
+ | 64 | 52B | 1 | 16 | 256 | 8 | 2048 | 435s | 31.3 | 05-26 |
120
+ | 64 | 52B | 1 | 16 | 512 | 6 | 3072 | 650s | 31.5 | 05-26 |
121
+ | 64 | 52B | 1 | 16 | 512 | 8 | 4096 | 870s | 31.3 | 05-26 |
122
+ | 64 | 52B | 1 | 32 | 256 | 4 | 1024 | 220s | 31.0 | 05-26 |
123
+ | | | | | | | | | | |
124
+
125
+
126
+ data:
127
+ - Size = Model Size
128
+ - `TP=4` in all of entries
129
+ - Speed is time per iteration - to complete global batch size
130
+ - Global batch size is `micro-batch-size * pp_chunks * dp_size`
131
+ - PP chunks is the number of PP stages, so each pipeline handles `micro-batch-size * pp_chunks`
132
+ - Seq length is 1024
133
+
134
+ notes:
135
+ - 32gpus had a very snag fit for gpu memory for 39B model (others were in ~75%) so it might be a bit too risky to OOM-borderline
136
+
137
+
138
+
139
+
140
+ #### Megatron + Deepspeed 3D (new branch)
141
+
142
+
143
+ Why:
144
+
145
+ 1. More generic pipeline API that is not hard-coded into the model
146
+ 2. Better memory efficiency - needs less GPU memory, so can probably work with fewer pipeline stages
147
+ 3. Works with ZeRO-Offload so can significantly reduce the GPUs required for fine-tuning once the model is pre-trained, making it accessible to a lot more folks, who don't have access to hundreds of GPUs.
148
+
149
+ How:
150
+
151
+
152
+ This is new branch synced with Megatron
153
+
154
+ DeepSpeed branch: https://github.com/ShadenSmith/DeepSpeed/tree/megatron2.4-3d
155
+ Megatron branch: https://github.com/jeffra/DSE/tree/megatron-2.4-ds-pipe
156
+
157
+ This script can now launch Meg alone or Meg + Deepspeed 3D (ignore the zero options it doesn't work yet):
158
+ https://github.com/jeffra/DSE/blob/megatron-2.4-ds-pipe/run.sh
159
+
160
+ ```
161
+ git clone https://github.com/ShadenSmith/DeepSpeed/ deepspeed-shaden
162
+ cd deepspeed-shaden
163
+ git checkout megatron2.4-3d
164
+ ```
165
+
166
+ ```
167
+ git clone https://github.com/jeffra/DSE megator-jeffra
168
+ cd megator-jeffra
169
+ git checkout megatron-2.4-ds-pipe
170
+ ```
171
+
172
+ See scripts and logs under [gpt2-meg-ds-3d](./gpt2-meg-ds-3d).
173
+
174
+ Now we use the same code-base for training w/ and w/o DS/3D - so can use a shared results table.
175
+ Also added memory usage columns.
176
+
177
+
178
+ | GPUs | Size | DS | GPU M | DP | PP | GAS | MBS | GBS | Speed | TFlops | Notes |
179
+ | ---: | ---: | -: | ----: | -: | -: | ---: | --: | ---: | ----: | -----: | ----: |
180
+ | 64 | 52B | Y | 26GB | 1 | 16 | 256 | 4 | 1024 | 137s | 46.7 | 06-10 |
181
+ | 64 | 52B | Y | 29GB | 1 | 16 | 256 | 4 | 1536 | 206s | 49.6 | 06-10 |
182
+ | 64 | 52B | Y | 32GB | 1 | 16 | 256 | 4 | 2048 | 270s | 50.5 | 06-10 |
183
+ | 64 | 52B | Y | 26GB | 1 | 16 | 1024 | 4 | 4096 | 544s | 50.1 | 06-10 |
184
+ | | | | | | | | | | | | |
185
+ | | | | | | | | | | | | |
186
+ | 64 | 52B | N | 32GB | 1 | 16 | 256 | 4 | 1024 | 126s | 54.1 | 06-10 |
187
+ | | | | | | | | | | | | |
188
+
189
+
190
+
191
+
192
+ ```
193
+ perl -le '$ng=64; $ms=52; $gbs=1024; $sp=146; print $ms*4*2*1024*$gbs / ( $sp * $ng * 1e3)'
194
+ ```
195
+
196
+ - DS: Deepspeed/3D enabled
197
+ - GPU memory: rounded up per GPU
198
+ - MBS: Micro BS
199
+ - GBS: Global BS = GAS * MBS * DP_SIZE
200
+ - GAS: Gradient Accumulation Steps (= MBS pipe stages, = PP chunks)
201
+
202
+ Resident CPU memory remained at about 3GB per GPU.
203
+
204
+
205
+ **zero_stage:1 + reduce_bucket_size**
206
+
207
+ also added `--partition-activations`
208
+
209
+ (`meg_ds_3d_gpt2_perf_n16_z1_try*.slurm`)
210
+
211
+ | GPUs | Size | DS | bucket | DP | PP | GAS | MBS | GBS | Speed | TFlops | Notes |
212
+ | ---: | ---: | -: | ----: | -: | -: | ---: | --: | ---: | ----: | -----: | ----: |
213
+ | 64 | 52B | Y | 5e8 | 2 | 8 | 128 | 4 | 1024 | 137s | 48.8 | 07-10 |
214
+ | 64 | 52B | Y | 1e9 | 2 | 8 | 128 | 4 | 1024 | 141s | 48.3 | 07-10 |
215
+ | 64 | 52B | Y | 2e9 | 2 | 8 | 128 | 4 | 1024 | 141s | 48.3 | 07-10 |
216
+ | | | | | | | | | | | | |
217
+
218
+ Note: Since PP*TP=8*4=32, so since there are 64GPUs - DP=2
219
+
220
+
221
+ ------------
222
+ Experiment 1:
223
+ TP=4, DP=2, PP=8, gas=256, DS_ZeRO Stage 1, PA=disabled,reduce_bucket_size=2e8,5e8, mbs=2,3,
224
+
225
+
226
+ | ID | GPUs | Size | DS | bucket | DP | PP | GAS | MBS | GBS | Speed | TFlops | Notes |
227
+ | --: | ---: | ---: | -: | ----: | -: | -: | ---: | --: | ---: | ----: | -----: | ----: |
228
+ | 1.1 | 64 | 52B | Y | 2e8 | 2 | 8 | 256 | 2 | 1024 | 150s | 45.4 | 07-10 |
229
+ | 1.2 | 64 | 52B | Y | 5e8 | 2 | 8 | 256 | 2 | 1024 | 150s | 45.4 | 07-10 |
230
+ | 1.3 | 64 | 52B | Y | 2e8 | 2 | 8 | 256 | 3 | 1536 | 213 | 48.0 | 07-10 |
231
+ | 1.4 | 64 | 52B | Y | 5e8 | 2 | 8 | 256 | 3 | 1536 | 208 | 49.1 | 07-10 |
232
+ | | | | | | | | | | | | | |
233
+
234
+
235
+ ------------
236
+
237
+ Experiment 2: HD=8192, NUM_LAYERs=48 (MSIZE=39)
238
+
239
+ Megatron+DeepSpeed:
240
+ - USE_DEEPSPEED=1, MSIZE=39, TP=4, PP=8, DP=2, ZeRO Stage 1, mbs=4, PA=disabled, reduce_bucket_size=2e8, gas=128
241
+ - USE_DEEPSPEED=1, MSIZE=39, TP=4, PP=8, DP=2, ZeRO Stage 1, mbs=4, PA=disabled, reduce_bucket_size=5e8, gas=128
242
+
243
+ Megatron Alone (which ever of the following runs better)
244
+ - USE_DEEPSPEED=0, MSIZE=39, TP=4, PP=16, DP=1, mbs=4, gas=256
245
+ - USE_DEEPSPEED=0, MSIZE=39, TP=4, PP =8, DP=2, mbs=4, gas=128
246
+
247
+
248
+ | ID | GPUs | Size | DS | bucket | DP | PP | GAS | MBS | GBS | Speed | TFlops | Notes |
249
+ | --: | ---: | ---: | -: | ----: | -: | -: | ---: | --: | ---: | ----: | -----: | ----: |
250
+ | 2.1 | 64 | 39B | Y | 2e8 | 2 | 8 | 128 | 4 | 1024 | 104s | 49.1 | 07-10 |
251
+ | 2.2 | 64 | 39B | Y | 5e8 | 2 | 8 | 128 | 4 | 1024 | 105s | 48.7 | 07-10 |
252
+ | 2.3 | 64 | 39B | N | na | 1 | 8 | 256 | 4 | 1024 | 109s | 46.9 | 07-10 |
253
+ | 2.4 | 64 | 39B | N | na | 2 | 8 | 128 | 4 | 1024 | 110s | 46.5 | 07-10 |
254
+ | | | | | | | | | | | | | |
255
+
256
+
257
+
258
+
259
+
260
+
261
+ ------------
262
+
263
+ note: I also did tests on 1 node - getting almost identical results for Meg w/ and w/o DS/3D. So all the fluctuations are the network to blame for.
264
+
265
+ ```
266
+ NNODES=1
267
+ PP_SIZE=1
268
+ TP_SIZE=4
269
+ MICRO_BATCH_SIZE=4
270
+ PP_CHUNKS=16 # GAS
271
+ MSIZE=4
272
+ ```
273
+
274
+ got an average over 22 iterations in msecs (too short for good stats)
275
+
276
+ ```
277
+ ds 6875.05
278
+ meg 6896.20
279
+ ```
280
+ but it's obvious they are pretty similar.
281
+
282
+
283
+ **save-checkpoint speed Measurement**
284
+
285
+ | Nodes | MSize | Time (ms) |
286
+ | ----: | ----: | -------: |
287
+ | 8 | 25B | 17960.68 |
288
+ | | | |
289
+ | 16 | 52B | 19298.14 |
290
+ | 32 | 52B | 19228.38 |
291
+ | 64 | 52B | 19652.80 |
292
+ | | | |
293
+ | 32 | 97B | 19417.09 |
294
+ | 64 | 97B | 11525.99 |
295
+ | | | |
296
+ | 64 | 181B | 19495.31 |
297
+ | | | |
298
+
299
+
300
+ Currently it saves everything, not just model weights.
301
+
302
+ The biggest test was for 181B model, 64 nodes, 256 gpus, and a total 2.4TB per checkpoint.
303
+
304
+ The breakdown is:
305
+
306
+ 1. 0.34TB in PP layer states, 1.4GB per file per gpu (1.4*256) - this one looks like 2bytes per param
307
+ 2. 2.00TB in optimizer states, 8.0GB per file per gpu (8*256) - this one looks like 12bytes per param
308
+
309
+ The data sizes are correspondingly:
310
+
311
+ 1. 2 bytes per param for fp16 weights
312
+ 2. 12 bytes are 8 bytes for optimizer and 4 bytes for fp32 model
313
+
314
+ To make lots of these we should copy away only the fp16 weights, and overwrite the checkpoint - otherwise a lot more HD space will be needed.
315
+
316
+ Important: also remember that `$six_ALL_CCFRSCRATCH` files that don't get accessed in 30 days get auto-deleted, so the important checkpoints need to be backed up (probably tar'ed and put on `$six_ALL_CCFRSTORE`).
317
+
318
+
319
+
320
+ ### Megatron + Deepspeed 3D (old branch)
321
+
322
+
323
+ **Important**: `DeepSpeedExamples/Megatron-LM-v1.1.5-3D_parallelism` is not in sync with M-LM master - so several config args don't match. It's about 8 months old.
324
+
325
+ See scripts and logs under [gpt2-meg-ds-3d-old](./gpt2-meg-ds-3d-old).
326
+
327
+ Uses 3D:
328
+ - TP: tensor parallelism
329
+ - PP: pipeline parallelism
330
+ - DP: data parallelism
331
+
332
+ same features as Megatron's native, but improved by Deepspeed
333
+
334
+ **Performance**
335
+
336
+ | GPUs | Size | DP | PP | PP chunks | Mic-BS | Glob-BS | Speed | TFlops | Notes |
337
+ | ---: | ---: | -: | -: | --------: | -----: | ------: | ----: | -----: | ----: |
338
+ | 64 | 52B | 1 | 16 | 256 | 4 | 1024 | 146s | 46.7 | 05-27 |
339
+ | | | | | | | | | | |
340
+
341
+
342
+ - GAS = Gradient Accumulation size (same as PP_chunks / number of PP stages)
343
+ - Global_bs = pp_chunks*micro_bs*dp_size
344
+ - `TP_SIZE=4` (size of the node)
345
+
346
+ ```
347
+ perl -le '$ng=64; $ms=52; $gbs=1024; $sp=146; print $ms*4*2*1024*$gbs / ( $sp * $ng * 1e3)'
348
+ ```
349
+
350
+
351
+
352
+
353
+
354
+
355
+ #### Megatron + Deepspeed ZeRO (old branch)
356
+
357
+
358
+ **Important**: `DeepSpeedExamples/Megatron-LM-v1.1.5-ZeRO3` is not in sync with M-LM master - so several config args don't match. It's about 8 months old.
359
+
360
+ See scripts and logs under [gpt2-meg-ds-zero](./gpt2-meg-ds-zero).
361
+
362
+ This one uses only TP from Megatron (no PP)
363
+
364
+ Not yet optimized with Deepspeed team!
365
+
366
+ **With Offload off**
367
+
368
+ **Performance**
369
+ | GPUs | Size | DP | Mic-BS | Glob-BS | Speed | TFlops | Notes |
370
+ | ---: | ----: | -: | ---: | -----: | ----: | -----: | ----: |
371
+ | 64 | 52B | 16 | 48 | 768 | 122s | 41.9 | 05-25 |
372
+ | 64 | 52B | 16 | 48 | 768 | 127s | 40.3 | 05-27 |
373
+ | | | | | | | | |
374
+
375
+
376
+ ```
377
+ perl -le '$ng=64; $ms=52; $gbs=768; $sp=122; print $ms*4*2*1024*$gbs / ( $sp * $ng * 1e3)'
378
+ ```
379
+ - Seq length is 1024
380
+ - `TP=4` in all of entries
381
+ - `DP` is number of nodes here
382
+ - Speed is time per iteration - to complete global batch size
383
+ - Global batch size is `micro-batch-size * dp-size`
384
+
385
+ - tried w/ and w/o Tiling once but saw no difference - perhaps would be more important on larger collections
386
+
387
+ | GPUs | Size | TP | DP | Mic-BS | Glob-BS | Speed | TFlops | Notes |
388
+ | ---: | ---: | -: | -: | -----: | ------: | ----: | -----: | ----: |
389
+ | 64 | 52B | 4 | 16 | 48 | 768 | 127s | 40.3 | |
390
+ | 64 | 52B | 2 | 32 | 32 | 1024 | 167s | 40.8 | |
391
+ | 64 | 52B | 1 | 64 | 16 | 1024 | 184s | 37.0 | |
392
+ | 64 | 24B | 1 | 64 | 16 | 1024 | 89.0s | 35.3 | |
393
+ | 64 | 24B | 2 | 32 | 32 | 1024 | 85.7s | 36.7 | |
394
+
395
+
396
+ **With full cpu offload**
397
+
398
+ | GPUs | Size | TP | DP | Mic-BS | Glob-BS | Speed | TFlops |
399
+ | ---: | ---: | -: | -: | -----: | ------: | ----: | -----: |
400
+ | 64 | 52B | 4 | 16 | 64 | 1024 | 171s | 39.9 |
401
+ | | | | | | | | |
402
+
403
+
404
+ Olatunji requested the following experiments:
405
+
406
+ - enabled/set: `--split-transformers --checkpoint-num-layers=2`
407
+ - removed: `--synchronize-each-layer --contigious-checkpointing`
408
+
409
+ | ID | GPUs | Size | ScatEmb | TP | DP | Mic-BS | Glob-BS | Speed | TFlops |
410
+ | -: | ---: | ---: | ------: | -: | -: | -----: | ------: | ----: | -----: |
411
+ | 1 | 64 | 52B | N | 4 | 16 | 48 | 768 | 119s | 43.0 |
412
+ | 2 | 64 | 52B | Y | 4 | 16 | 48 | 768 | 115s | 44.5 |
413
+ | 3 | 64 | 52B | Y | 4 | 16 | 52 | 832 | 124s | 44.7 |
414
+ | 4 | 64 | 52B | N | 2 | 32 | 32 | 1024 | 159s | 42.9 |
415
+ | 5 | 64 | 52B | Y | 2 | 32 | 32 | 1024 | 158s | 43.1 |
416
+ | 6 | 64 | 52B | Y | 2 | 32 | 36 | 1152 | 176s | 43.6 |
417
+ | 7 | 64 | 52B | Y | 4 | 16 | 56 | 896 | 161s | 37.0 |
418
+ | 8 | 64 | 52B | Y | 2 | 32 | 38 | 1216 | 178s | 45.5 |
419
+ | 9 | 64 | 52B | Y | 1 | 64 | 18 | 1152 | 197s | 38.9 |
420
+ | 10 | 64 | 52B | Y | 1 | 64 | 20 | 1280 | 219s | 38.9 |
421
+ | 11 | 64 | 52B | Y | 1 | 64 | 22 | 1408 | OOM | |
422
+ | | | | | | | | | | |
423
+
424
+
425
+ following 2:
426
+ from ID 8:
427
+ - removed `--checkpoint-in-cpu'
428
+ - changed values
429
+
430
+ | ID | GPUs | Size | ScatEmb | TP | DP | Mic-BS | Glob-BS | Speed | TFlops |
431
+ | -: | ---: | ---: | ------: | -: | -: | -----: | ------: | ----: | -----: |
432
+ | 12 | 64 | 52B | Y | 4 | 16 | 24 | 384 | 72s | 35.5 |
433
+ | 13 | 64 | 52B | Y | 2 | 32 | 16 | 512 | 79s | 38.3 |
434
+ | | | | | | | | | | |
435
+
436
+
437
+ following 4:
438
+ from ID 12:
439
+ - removed `--split-transformers`
440
+ - changed values
441
+ - toggled `--checkpoint-in-cpu` (PA_CPU column)
442
+
443
+ | ID | GPUs | Size | ScatEmb | PA_CPU | TP | DP | Mic-BS | Glob-BS | Speed | TFlops |
444
+ | -: | ---: | ---: | ------: | -----: | -: | -: | -----: | ------: | ----: | -----: |
445
+ | 14 | 64 | 52B | Y | N | 4 | 16 | 24 | 384 | 72s | 35.5 |
446
+ | 15 | 64 | 52B | Y | Y | 4 | 16 | 24 | 384 | 71s | 36.0 |
447
+ | 16 | 64 | 52B | Y | N | 2 | 32 | 16 | 512 | 87s | 39.2 |
448
+ | 17 | 64 | 52B | Y | Y | 2 | 32 | 16 | 512 | 88s | 38.7 |
449
+ | | | | | | | | | | | |
450
+
451
+
452
+
453
+
454
+
455
+
456
+ ### HF + Deepspeed Zero 3 + Full Offload
457
+
458
+ See scripts and logs under [gpt2-hf-ds](./gpt2-hf-ds).
459
+
460
+ Not yet optimized with Deepspeed team!
461
+
462
+ **Max model size**
463
+
464
+ | GPUs | Size | Mic-BS | Glob-BS | Speed | TFlops |
465
+ | ---: | ----: | -----: | ------: | ----: | -----: |
466
+ | 16 | 25B | 4 | 64 | 58s | 14.0 |
467
+ | 32 | 52B | 4 | 128 | 114s | 14.9 |
468
+ | 64 | 97B | 4 | 256 | 222s | 14.3 |
469
+ | | | | | | |
470
+
471
+
472
+ **Performance**
473
+
474
+ | GPUs | Size | Zero | Opt Offl | Par Offl | Mic-BS | Glob-BS | Speed | TFlops | Notes |
475
+ | ---: | ----: | --: | -------: | -------: | -----: | ------: | ----: | -----: | ----: |
476
+ | 64 | 52B | 3 | N | N | 8 | 512 | 139s | 24.5 | 05-25 |
477
+ | 64 | 52B | 3 | N | N | 4 | 256 | 185s | 9.2 | 05-27 |
478
+ | 64 | 52B | 3 | N | N | 8 | 512 | 118s | 28.9 | 05-27 |
479
+ | | | | | | | | | | |
480
+ | 64 | 52B | 3 | N | N | 8 | 512 | 117s | 29.1 | 05-28 |
481
+ | 64 | 52B | 3 | N | N | 6 | 384 | 111s | 23.0 | 05-28 |
482
+ | 64 | 52B | 3 | N | N | 10 | 640 | 150s | 28.4 | 05-28 |
483
+ | 64 | 52B | 3 | Y | N | 12 | 768 | 183s | 27.9 | 05-28 |
484
+ | 64 | 52B | 3 | Y | N | 12 | 768 | 175s | 29.2 | 05-28 |
485
+ | 64 | 52B | 3 | Y | Y | 12 | 768 | 177s | 28.9 | 05-28 |
486
+ | | | | | | | | | | |
487
+ | 64 | 52B | 2 | Y | N | | | OOM | | 05-28 |
488
+ | | | | | | | | | | |
489
+
490
+
491
+ - DP=GPUs
492
+ - global bs = micro bs * DP
493
+ - Speed reported by HF Trainer metrics is `samples_per_second` - So total throughput in the table is `glob_bs/samples_per_second`
494
+
495
+ notes:
496
+ - gradient checkpointing activated
497
+
498
+
499
+ ```
500
+ perl -le '$ng=64; $ms=52; $gbs=512; $sp=139.52; print $ms*4*2*1024*$gbs / ( $sp * $ng * 1e3)'
501
+ 22
502
+ ```
503
+
504
+ ZeRO-2 with model of this size I can't fit into this setup at all - even BS=4 - it keeps getting on getting killed by cgroups - i.e. it's asking for more than 40GB general RAM per gpu. Same story w/ or w/o offload.
505
+
506
+
507
+ ## Magic scripts
508
+
509
+ - Calculate the TFlops:
510
+
511
+ ```
512
+ perl -le '$ng=64; $ms=52; $gbs=1024; $sp=127; print $ms*4*2*1024*$gbs / ( $sp * $ng * 1e3)'
513
+ ```
514
+ (ng = total gpus, ms = model size in B, gbs = global batch size, sp = throughput in seconds)
515
+
516
+ same with bash env vars and broken down GBS into mbs*dp*gas (gas=pp_chunks):
517
+ ```
518
+ echo "($MSIZE*4*2*1024*$MICRO_BATCH_SIZE*$DP_SIZE*$PP_CHUNKS)/($THROUGHPUT*$NNODES*4*1000)" | bc -l
519
+ ```
520
+
521
+ - Automatically process slurm/ megatron log files, average the throughput (prints 'fail' on when the training failed w/o producing a single iteration stat):
522
+ ```
523
+ find . -type f -name "*out" -exec perl -lne 'm|elapsed time per iteration .ms.: ([\d\.]+)| && do {$x+=$1; $c++}; END { print "$ARGV " . ($c ? int($x/$c/1000) : "fail")}' {} \; | sort | grep -v fail
524
+ ```
525
+
526
+
527
+ - re-generate tflops column in the tables above:
528
+ ```
529
+ perl -ne 's#^(\| +(\d+) +\| +(\d+)B.*? +(\d+) +\| +([\d\.]+)s) +\| +[\d\.]+ +(.*?)$#"$1 | ".sprintf("%.01f", $3*4*2*1024*$4 / ($5*$2*1e3))." $6"#e && print ' gpt2.md
530
+ ```
531
+
532
+ I originally had a mistake in model size calculation script - which has been fixed in tables and the scripts, but many logs still have the old formula - I used G `(2**30)` instead of B `(10**9)` so the model size was getting reported smaller than it is.
533
+
534
+ Now it's the correct version:
535
+ ```
536
+ NHIDDEN=4096
537
+ NLAYERS=36
538
+ SEQ_LEN=512
539
+ VOCAB_SIZE=50257
540
+ python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + s*h + 2*h) / 10**9 :.0f}B')"
541
+ ```
542
+
543
+ - misc file renames
544
+
545
+
546
+ ```
547
+ # rename both .sh and .out based on GAS (PP_CHUNKS) value inside
548
+ # 61B-megatron-mbs-2-pp16-dp-1.sh -> 61B-megatron-mbs-2-pp16-dp-1-gas128.sh
549
+ perl -lne 'm|PP_CHUNKS=(\d+)| && do {$gas=$1; $q = chr(39); $ARGV=~s|\.sh$||; print qq[rename.pl ${q}s|dp-(\\d)|dp-\$1-gas-$gas|$q $ARGV*] }' *sh > run-renames.sh
550
+ sh ./run-renames.sh
551
+ ```
552
+
553
+ - A formula to match the script name to the log file, by rewriting the `job-name`:
554
+ ```
555
+ perl -pi -e '$ARGV=~s|\.sh$||; s|#SBATCH --job-name=.*|#SBATCH --job-name=$ARGV|' *slurm *sh
556
+ ```
557
+ now the log file will match the slurm file.
558
+
559
+ - change runtime:
560
+ ```
561
+ perl -pi -e '$ARGV=~s|\.sh$||; s|#SBATCH --time=.*|#SBATCH --time=00:20:00|' *slurm *sh
562
+ ```
563
+
564
+ - calculate speed + tflops from filename and averaging `elapsed time per iteration` from the log - including failed runs (needs the `-gas-` file rename from above)
565
+
566
+ ```
567
+ find . -type f -name "*out" -exec perl -lne 'm|elapsed time per iteration .ms.: ([\d\.]+)| && do {$x+=$1; $c++}; END { $sp=$c ? int($x/$c/1000) : 0; $d=qr/(\d+)/; $ARGV=~m|${d}B-.*?-mbs-$d-pp$d-dp-$d-gas-$d| && do {$ng=64; $ms=$1; $gbs=$2*$4*$5; $tf=$sp ? sprintf "%0.1f", $ms*4*2*1024*$gbs / ( $sp * $ng * 1e3) : 0}; $r = $sp ? "$ARGV $sp $tf" : "$ARGV fail"; print $r}' {} \; | sort -nk3 -r
568
+ ./61B-megatron-mbs-2-pp16-dp-1-gas-512-200977.out 144 55.5
569
+ ./55B-megatron-mbs-2-pp16-dp-1-gas-512-200968.out 134 53.8
570
+ ./55B-ds-zero0-mbs-2-pp16-dp-1-gas-512-200964.out 141 51.1
571
+ ./55B-ds-zero0-mbs-4-pp16-dp-1-gas-256-200965.out 145 49.7
572
+ ./55B-megatron-mbs-4-pp16-dp-1-gas-256-200970.out 149 48.4
573
+ ./61B-ds-zero0-mbs-4-pp16-dp-1-gas-256-200973.out 166 48.2
574
+ ./61B-ds-zero0-mbs-2-pp16-dp-1-gas-512-200972.out 169 47.3
575
+ ./61B-megatron-mbs-4-pp16-dp-1-gas-256-200979.out 172 46.5
576
+ ./61B-megatron-mbs-4-pp8-dp-2-gas-128-200980.out fail
577
+ ./61B-megatron-mbs-2-pp8-dp-2-gas-256-200978.out fail
578
+ ./61B-ds-zero1-mbs-4-pp8-dp-2-gas-128-200976.out fail
579
+ ./61B-ds-zero1-mbs-2-pp8-dp-2-gas-256-200974.out fail
580
+ ./55B-megatron-mbs-4-pp8-dp-2-gas-128-200971.out fail
581
+ ./55B-megatron-mbs-2-pp8-dp-2-gas-256-200969.out fail
582
+ ./55B-ds-zero1-mbs-4-pp8-dp-2-gas-128-200967.out fail
583
+ ./55B-ds-zero1-mbs-2-pp8-dp-2-gas-256-200966.out fail
584
+ ```
585
+
586
+ - same as above but with finer control over which files are processed and preserving their run order, e.g. sorted by latest run:
587
+ ```
588
+ ls -1t 61*out | xargs -n1 perl -lne 'm|elapsed time per iteration .ms.: ([\d\.]+)| && do {$x+=$1; $c++}; END { $sp=$c ? int($x/$c/1000) : 0; $d=qr/(\d+)/; $ARGV=~m|${d}B-.*?-mbs-$d-pp$d-dp-$d-gas-$d| && do {$ng=64; $ms=$1; $gbs=$2*$4*$5; $tf=$sp ? sprintf "%0.1f", $ms*4*2*1024*$gbs / ( $sp * $ng * 1e3) : 0}; $r = $sp ? "$ARGV $sp $tf" : "$ARGV fail"; print $r}'
589
+ 61B-ds-zero1-mbs-2-pp16-dp-1-gas-512.18488.out 196 40.8
590
+ 61B-megatron-mbs-2-pp16-dp-1-gas-512.8189.out 176 45.4
591
+ 61B-ds-zero1-mbs-2-pp16-dp-1-gas-512.17709.out 194 41.2
592
+ ```
experiments/lm-harness-evaluation.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LM Harness Evaluation
2
+
3
+ The evaluation harness from EleutherAI is integrated a submodule. We use a fork on [HF's Github](https://github.com/huggingface/lm-evaluation-harness).
4
+ To initialize the submodule, run:
5
+ ```bash
6
+ git submodule init
7
+ git submodule update
8
+ ```
9
+
10
+ Make sure you have the requirements in `lm-evaluation-harness`:
11
+ ```bash
12
+ cd lm-evaluation-harness
13
+ pip install -r requirements.txt
14
+ ```
15
+
16
+ To launch an evaluation, run:
17
+ ```bash
18
+ python lm-evaluation-harness/main.py \
19
+ --model gpt2 \
20
+ --model_args pretrained=gpt2-xl \
21
+ --tasks cola,mrpc,rte,qnli,qqp,sst,boolq,cb,copa,multirc,record,wic,wsc,coqa,drop,lambada,lambada_cloze,piqa,pubmedqa,sciq \
22
+ --provide_description \ # Whether to provide the task description
23
+ --num_fewshot 3 \ # Number of priming pairs
24
+ --batch_size 2 \
25
+ --output_path eval-gpt2-xl
26
+ ```
27
+
28
+ Please note:
29
+ - As of now, only single GPU is supported in `lm-evaluation-harness`.
experiments/performance.md ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Performance
2
+
3
+ ## Network
4
+
5
+ The state of the network can hugely impact the performance of the training - to the tune of 40% difference in throughput.
6
+
7
+ When making slurm allocations, use `--contiguous` to request nodes to be close to each other. Unless reserved ahead of time by the admins, such constraint may add a huge delay for when such requests will be granted.
experiments/tr8-104B.md ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Train 8 104B wide tune up
2
+
3
+ note: this tune up table is somewhat invalid since during the tune up a mistake was made in `FFN_HIDDEN_SIZE` which was incorrectly set to a much lower value, so the tests below were really tested a 58B model. So the TFLOPs numbers in this section are incorrect (bigger than they are in reality). but I'm not sure how to fix it, since I don't think the formula applies when the model is lopsided. The numbers in sections afterwards are correct.
4
+
5
+ The misconfiguration error has been fixed later in the experiments.
6
+
7
+ ```
8
+ NLAYERS=32
9
+ NHIDDEN=16384
10
+ NHEADS=32
11
+ SEQ_LEN=2048
12
+ VOCAB_SIZE=50257
13
+ ```
14
+
15
+ BS=1024, SIZE=104B,
16
+
17
+ | NNODES | TP | PP | DP | MBS | Speed | TFlops | Notes |
18
+ | -----: | --: | --: | --: | --: | ----: | -----: | --------------------: |
19
+ | 32 | 4 | 32 | 1 | 1 | 256 | 54.5 | 31.5GB |
20
+ | 64 | 4 | 64 | 1 | 1 | 155 | 55.0 | 24GB |
21
+ | | | | | | | | |
22
+
23
+ ```
24
+ perl -le '$ng=32*4; $sp=256; $ms=104; $gbs=1048; $seqlen=2048; print $ms*4*2*$seqlen*$gbs / ( $sp * $ng * 1e3)'
25
+ perl -le '$ng=64*4; $sp=155; $ms=104; $gbs=1048; $seqlen=2048; print $ms*4*2*$seqlen*$gbs / ( $sp * $ng * 1e3)'
26
+ ```
27
+
28
+ (ng = total gpus, ms = model size in B, gbs = global batch size, sp = throughput in seconds)
29
+
30
+ BS=2048
31
+
32
+
33
+ | NNODES | TP | PP | DP | MBS | Speed | TFlops | Notes |
34
+ | ----: | --: | --: | --: | --: | ----: | -----: | --------------------: |
35
+ | 32 | 4 | 32 | 1 | 1 | 586 | 46.52 | GB |
36
+ | 64 | 4 | 64 | 1 | 1 | 301 | 45.28 | 25GB |
37
+ | | | | | | | | |
38
+
39
+
40
+ ```
41
+ perl -le '$ng=32*4; $sp=586; $ms=104; $gbs=2048; $seqlen=2048; print $ms*4*2*$seqlen*$gbs / ( $sp * $ng * 1e3)'
42
+ perl -le '$ng=64*4; $sp=301; $ms=104; $gbs=2048; $seqlen=2048; print $ms*4*2*$seqlen*$gbs / ( $sp * $ng * 1e3)'
43
+ ```
44
+
45
+
46
+
47
+ e.g. interactive tuning on 32 nodes
48
+
49
+ ```
50
+ salloc --account=six@gpu --constraint=v100-32g --nodes=32 --ntasks=32 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=3:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod
51
+ ```
52
+
53
+
54
+
55
+
56
+ ## BNB
57
+
58
+ w/ `--use-bnb-optimizer`
59
+
60
+ | NNODES | TP | PP | DP | MBS | Speed | TFlops | Notes |
61
+ | ----: | --: | --: | --: | --: | ----: | -----: | --------------------: |
62
+ | 32 | 4 | 16 | 2 | 1 | 681 | 40.0 | 31GB |
63
+ | 32 | 2 | 32 | 2 | 1 | 633 | 43.0 | 31GB |
64
+ | 32 | 1 | 64 | 2 | 1 | | | 32GB OOMs |
65
+ | 32 | 4 | 32 | 1 | 1 | 688 | 39.6 | 27GB (same conf as normal 104B) |
66
+ | | | | | | | | |
67
+
68
+ ```
69
+ perl -le '$ng=32*4; $sp=633; $ms=104; $gbs=2048; $seqlen=2048; print $ms*4*2*$seqlen*$gbs / ( $sp * $ng * 1e3)'
70
+ ```
71
+
72
+ To ensure we are comparing apples to apples, trying to using the same allocations re-testing the baseline (but I'm not I get the same nodes all the time).
73
+
74
+ The baseline of 104B experiment w/o `--use-bnb-optimizer` that we have been using for all experiments
75
+
76
+ using the `main` branch:
77
+
78
+ | NNODES | TP | PP | DP | MBS | Speed | TFlops | Notes |
79
+ | ----: | --: | --: | --: | --: | ----: | -----: | --------------------: |
80
+ | 32 | 4 | 32 | 1 | 1 | 696 | 39.17 | 30GB (same conf as normal 104B) |
81
+ | | | | | | | | |
82
+
83
+ using the old `big-science` branch
84
+
85
+ | NNODES | TP | PP | DP | MBS | Speed | TFlops | Notes |
86
+ | ----: | --: | --: | --: | --: | ----: | -----: | --------------------: |
87
+ | 32 | 4 | 32 | 1 | 1 | 706 | 38.6 | 30GB (same conf as normal 104B) |
88
+ | | | | | | | | |
89
+
90
+
91
+
92
+ ## A100s
93
+
94
+ GPUS_PER_NODE=8
95
+ NNODES=16
96
+
97
+
98
+ TP_SIZE=4 # always fixed to the size of a single node
99
+ PP_SIZE=32 # NLAYERS must be a multiple of PP_SIZE here
100
+ MICRO_BATCH_SIZE=1
101
+ GLOBAL_BATCH_SIZE=2048
102
+
103
+ TFLOPs: 72.72-82 (was speeding up - so very inconclusive)
finetune/README.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Finetuning
2
+
3
+ Notes on the plans to do finetuning with the pre-trained model
4
+
5
+ # Large Model on smaller hardware setup
6
+
7
+ - fine-tuning a 150-200B model with fewer GPUs than the pre-training setup
8
+
9
+ ## a. Fine-Tuning requiring only the model weights from the pre-training and uninitialized optimizer states
10
+
11
+
12
+ Solution: This can also be done using ZeRO-Infinity
13
+
14
+ Hardware Requirements: This would require about 2.5-5 TB of aggregate memory for 100-200B model. It can be either CPU memory or NVMe memory, and it can be within a single node or across nodes. A single node server with enough CPU or NVMe can work, if speed is not an issue.
15
+
16
+ Estimated Work: We can do this with ZeRO-Infinity. Seems like @Shaden Smith already has the code to load the model parameters checkpoints from Megatron+DeepSpeed 3D to Megatron+ DeepSpeed ZeRO-Infinity.
17
+
18
+ ## b. Continued-Training requiring both the model weights and optimizer states after pre-training
19
+
20
+ Solution: This can be done using Megatron+DeepSpeed 3D with ZeRO CPU Offload.
21
+
22
+ Hardware Requirements: This option will require 2-4 TB of aggregate CPU memory to store the optimizer states and 600-1200GB of aggregate GPU memory to store parameters, gradients and activations for 100-200B model.
23
+
24
+ This reduces the number of GPUs required by 4x. Will run on 32-64 GPUs on 4-8x nodes with 8xV100, 768GB RAM.
25
+
26
+ Estimated work: The current code already supports it.
jz/archs/enwiki.md ADDED
@@ -0,0 +1,415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ # GPT2 Comparisons on EnWiki
4
+
5
+ This is a back up copy of the work in progress notes when it was started using Enwiki.
6
+
7
+ It's currently not being kept up-to-date
8
+
9
+ For now we moved to openwebtext so the main README.md doc is now using that.
10
+
11
+ ## SLURM
12
+
13
+
14
+ 1 nodes / 4 gpus:
15
+
16
+ ```
17
+ srun --pty --nodes=1 --ntasks=4 --cpus-per-task=10 --gres=gpu:4 --hint=nomultithread --time=60 bash
18
+ ```
19
+
20
+
21
+
22
+ ## Data
23
+
24
+
25
+
26
+ ### Enwiki
27
+
28
+ data prep https://github.com/NVIDIA/Megatron-LM#collecting-wikipedia-training-data
29
+
30
+ Megatron-LM's training is based on enwiki
31
+ huge dataset - but it's not needed for sample run, see short sample below
32
+ ```
33
+ wget https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2
34
+ pip install git+https://github.com/attardi/wikiextractor
35
+ wikiextractor --json enwiki-latest-pages-articles.xml.bz2
36
+ ```
37
+
38
+
39
+ short sample
40
+ ```
41
+ cd data
42
+ wget https://dumps.wikimedia.org/enwiki/20210501/enwiki-20210501-pages-articles-multistream1.xml-p1p41242.bz2
43
+ wikiextractor --json enwiki-20210501-pages-articles-multistream1.xml-p1p41242.bz2
44
+ mv text text-short
45
+ cd -
46
+ python tools/preprocess_data.py \
47
+ --input data/text-short/AD/wiki_29 \
48
+ --output-prefix my-gpt2 \
49
+ --vocab data/gpt2-vocab.json \
50
+ --dataset-impl mmap \
51
+ --tokenizer-type GPT2BPETokenizer \
52
+ --merge-file data/gpt2-merges.txt \
53
+ --append-eod
54
+ ```
55
+
56
+ ### OpenWebText
57
+
58
+ Using OpenWebText https://huggingface.co/datasets/openwebtext
59
+
60
+ ```
61
+ from datasets import load_dataset
62
+ dataset = load_dataset("openwebtext", split='train')
63
+ dataset = load_dataset("stas/openwebtext-10k", split='train')
64
+ ```
65
+
66
+ Ready datasets:
67
+
68
+ 1. HF datasets use:
69
+
70
+ * `openwebtext` - 8M records `--dataset_name "openwebtext"`
71
+ * `stas/openwebtext-10k` - 10K records `--dataset_name "stas/openwebtext-10k"`
72
+
73
+ 2. Jsonlines (derived):
74
+
75
+ * `$six_ALL_CCFRWORK/datasets-custom/openwebtext/openwebtext.jsonl`
76
+ * `$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/openwebtext-10k.jsonl`
77
+
78
+ 3. Megatron-preprocessed datasets (derived):
79
+
80
+ * `$six_ALL_CCFRWORK/datasets-custom/openwebtext/meg-gpt2_*` (still churning)
81
+ * `$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_*`
82
+
83
+
84
+ #### How the above was done
85
+
86
+ To convert to jsonlines for Megatron
87
+
88
+ run on a beefy cpu instance (but firewalled), e.g.:
89
+ ```
90
+ srun --pty --nodes=1 --ntasks=4 --cpus-per-task=10 --gres=gpu:0 --hint=nomultithread --time=60 bash
91
+ ```
92
+
93
+ small
94
+ ```
95
+ mkdir -p $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k
96
+ cd $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k
97
+ $six_ALL_CCFRWORK/code/bigscience/data/megatron/openwebtext-to-jsonl.py -10k
98
+ ```
99
+
100
+ full (needs lots or RAM)
101
+ ```
102
+ mkdir -p $six_ALL_CCFRWORK/datasets-custom/openwebtext
103
+ cd $six_ALL_CCFRWORK/datasets-custom/openwebtext
104
+ HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 $six_ALL_CCFRWORK/code/bigscience/data/megatron/openwebtext-to-jsonl.py
105
+ ```
106
+
107
+
108
+
109
+ To prep for megatron 10k-sample
110
+ ```
111
+ cd $six_ALL_CCFRWORK/code/megatron-lm
112
+ python tools/preprocess_data.py \
113
+ --input $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/openwebtext-10k.jsonl \
114
+ --output-prefix $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2 \
115
+ --vocab data/gpt2-vocab.json \
116
+ --dataset-impl mmap \
117
+ --tokenizer-type GPT2BPETokenizer \
118
+ --merge-file data/gpt2-merges.txt \
119
+ --append-eod
120
+ ```
121
+
122
+ To prep for megatron full dataset
123
+ ```
124
+ cd $six_ALL_CCFRWORK/code/megatron-lm
125
+ python tools/preprocess_data.py \
126
+ --input $six_ALL_CCFRWORK/datasets-custom/openwebtext/openwebtext.jsonl \
127
+ --output-prefix $six_ALL_CCFRWORK/datasets-custom/openwebtext/meg-gpt2 \
128
+ --vocab data/gpt2-vocab.json \
129
+ --dataset-impl mmap \
130
+ --tokenizer-type GPT2BPETokenizer \
131
+ --merge-file data/gpt2-merges.txt \
132
+ --append-eod
133
+ ```
134
+ as it should take about 11h to convert use `gpt2/jsonl-to-meg.slurm` job to complete it
135
+
136
+
137
+
138
+ ## Model
139
+
140
+
141
+ ### HF transformers model prep
142
+
143
+
144
+ prep HF model - it's not avaliable on the hub
145
+
146
+ 1. Download nvidia checkpoint:
147
+ ```
148
+ wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_lm_345m/versions/v0.0/zip -O megatron_lm_345m_v0.0.zip
149
+ ```
150
+
151
+ 2. Convert:
152
+ ```
153
+ python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py megatron_lm_345m_v0.0.zip
154
+ ```
155
+
156
+ 3. Fetch missing files
157
+ ```
158
+ git clone https://huggingface.co/nvidia/megatron-gpt2-345m/
159
+ ```
160
+
161
+ 4. Move the converted files into the cloned model dir
162
+ ```
163
+ mv config.json pytorch_model.bin megatron-gpt2-345m/
164
+ ```
165
+
166
+ 5. megatron-gpt2-345m dir should now have all the files which can be passed as `--model_name_or_path megatron-gpt2-345m`
167
+
168
+
169
+ XXX: may be will use some small samples for testing - need .txt and .json for megatron-lm
170
+
171
+ ```
172
+ #--train_file {data_dir}/sample_text.txt \
173
+ #--validation_file {data_dir}/sample_text.txt \
174
+ ```
175
+
176
+
177
+ ## Training
178
+
179
+ ### Megatron-LM
180
+
181
+ running native https://github.com/NVIDIA/Megatron-LM
182
+
183
+ ### finetuning on a single GPU
184
+
185
+
186
+ adding --finetune to work with existing checkpoint
187
+ ```
188
+ CHECKPOINT_PATH=checkpoints/megatron_lm_345m_v0.0/release
189
+ SAVE_CHECKPOINT_PATH=data/checkpoints
190
+ VOCAB_FILE=data/gpt2-vocab.json
191
+ MERGE_FILE=data/gpt2-merges.txt
192
+ DATA_PATH=my-gpt2_text_document
193
+
194
+ # --train-samples 200 \
195
+ # --lr-decay-samples 150 \
196
+ # --train-iters 100000 \
197
+ # --lr-decay-iters 320000 \
198
+ GPT_ARGS="--num-layers 24 \
199
+ --hidden-size 1024 \
200
+ --num-attention-heads 16 \
201
+ --seq-length 1024 \
202
+ --max-position-embeddings 1024 \
203
+ --micro-batch-size 4 \
204
+ --global-batch-size 8 \
205
+ --lr 0.00015 \
206
+ --lr-decay-style cosine \
207
+ --vocab-file $VOCAB_FILE \
208
+ --merge-file $MERGE_FILE \
209
+ --lr-warmup-fraction .01 \
210
+ --finetune \
211
+ --train-iters 1000 \
212
+ --lr-decay-iters 800 \
213
+ --fp16"
214
+
215
+ OUTPUT_ARGS="--log-interval 10 \
216
+ --save-interval 500 \
217
+ --eval-interval 100 \
218
+ --eval-iters 10 \
219
+ --checkpoint-activations"
220
+
221
+ python pretrain_gpt.py \
222
+ $GPT_ARGS \
223
+ $OUTPUT_ARGS \
224
+ --save $SAVE_CHECKPOINT_PATH \
225
+ --load $CHECKPOINT_PATH \
226
+ --data-path $DATA_PATH
227
+ ```
228
+
229
+
230
+ ### finetune distributed with MP
231
+
232
+
233
+ ```
234
+ OUTPUT_ARGS="--log-interval 10 \
235
+ --save-interval 500 \
236
+ --eval-interval 100 \
237
+ --eval-iters 10 \
238
+ --checkpoint-activations"
239
+
240
+ VOCAB_FILE=data/gpt2-vocab.json
241
+ MERGE_FILE=data/gpt2-merges.txt
242
+ DATA_PATH=my-gpt2_text_document
243
+ CHECKPOINT_PATH=checkpoints/megatron_lm_345m_v0.0/release
244
+ SAVE_CHECKPOINT_PATH=data/checkpoints
245
+
246
+ GPUS_PER_NODE=4
247
+ NNODES=1
248
+
249
+ #Change for multinode config
250
+
251
+ MASTER_ADDR=localhost
252
+ MASTER_PORT=6000
253
+ NODE_RANK=0
254
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
255
+
256
+ DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
257
+
258
+ # --train-iters 100000 \
259
+ # --lr-decay-iters 320000 \
260
+
261
+ python -m torch.distributed.launch \
262
+ $DISTRIBUTED_ARGS \
263
+ pretrain_gpt.py \
264
+ --tensor-model-parallel-size 2 \
265
+ --pipeline-model-parallel-size 2 \
266
+ --num-layers 24 \
267
+ --hidden-size 1024 \
268
+ --num-attention-heads 16 \
269
+ --micro-batch-size 4 \
270
+ --global-batch-size 16 \
271
+ --seq-length 1024 \
272
+ --max-position-embeddings 1024 \
273
+ --save $SAVE_CHECKPOINT_PATH \
274
+ --load $CHECKPOINT_PATH \
275
+ --data-path $DATA_PATH \
276
+ --vocab-file $VOCAB_FILE \
277
+ --merge-file $MERGE_FILE \
278
+ --data-impl mmap \
279
+ --split 949,50,1 \
280
+ --distributed-backend nccl \
281
+ --lr 0.00015 \
282
+ --lr-decay-style cosine \
283
+ --min-lr 1.0e-5 \
284
+ --weight-decay 1e-2 \
285
+ --clip-grad 1.0 \
286
+ --lr-warmup-fraction .01 \
287
+ $OUTPUT_ARGS \
288
+ --train-samples 5000 \
289
+ --lr-decay-samples 4000 \
290
+ --finetune \
291
+ --fp16
292
+ ```
293
+
294
+
295
+ ### stats ###
296
+
297
+ ```
298
+ 16gb v100:
299
+ nodes=1, gpus=4 => 560 ms / iteration
300
+ nodes=1, gpus=1 => 628 ms / iteration
301
+ ```
302
+
303
+
304
+ ### Megatron-LM+Deepspeed: w/ deepspeed Pipeline
305
+
306
+ This is the version with Deepspeed's pipeline
307
+
308
+ https://github.com/microsoft/DeepSpeedExamples/blob/master/Megatron-LM-v1.1.5-3D_parallelism/examples/ds_pretrain_gpt2_pipe.sh
309
+
310
+
311
+
312
+ ### Megatron-LM+Deepspeed: w/ deepspeed zero3/inf
313
+
314
+ This is the version with Deepspeed's Zero3/inf
315
+
316
+ https://github.com/microsoft/DeepSpeedExamples/blob/master/Megatron-LM-v1.1.5-ZeRO3/examples/ds_pretrain_gpt2-zero3.sh
317
+
318
+
319
+
320
+ ### HF transformers distributed
321
+
322
+ Have to run once on a non-gpu instance which has network to retrieve the model and data files and get those cached.
323
+
324
+
325
+ ```
326
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
327
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
328
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
329
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
330
+ ```
331
+
332
+ ```
333
+ MODEL=$WORK/hf/megatron-lm/checkpoints/megatron-gpt2-345m
334
+ DATASET1=" \
335
+ --dataset_name wikitext \
336
+ --dataset_config_name wikitext-2-raw-v1"
337
+
338
+ DATASET=" \
339
+ --dataset_name openwebtext"
340
+ ```
341
+
342
+ first run on networked instance to get the dataset et, al.
343
+ ```
344
+ PYTHONPATH="src" \
345
+ examples/pytorch/language-modeling/run_clm.py \
346
+ --model_name_or_path $MODEL \
347
+ $DATASET \
348
+ --output_dir output_dir \
349
+ --overwrite_output_dir \
350
+ --do_train \
351
+ --do_eval \
352
+ --max_train_samples 160 \
353
+ --max_eval_samples 160 \
354
+ --per_device_train_batch_size 4 \
355
+ --per_device_eval_batch_size 4 \
356
+ --num_train_epochs 1 \
357
+ --warmup_steps 8 \
358
+ --block_size 64 \
359
+ --report_to none
360
+ ```
361
+
362
+
363
+ 2nd run on gpu instance w/o network
364
+ ```
365
+ PYTHONPATH="src" \
366
+ HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \
367
+ python -m torch.distributed.launch --nproc_per_node=4 \
368
+ examples/pytorch/language-modeling/run_clm.py \
369
+ --model_name_or_path $MODEL \
370
+ $DATASET \
371
+ --output_dir output_dir \
372
+ --overwrite_output_dir \
373
+ --do_train \
374
+ --do_eval \
375
+ --max_train_samples 160 \
376
+ --max_eval_samples 160 \
377
+ --per_device_train_batch_size 4 \
378
+ --per_device_eval_batch_size 4 \
379
+ --num_train_epochs 1 \
380
+ --warmup_steps 8 \
381
+ --block_size 64 \
382
+ --fp16 \
383
+ --report_to none
384
+ ```
385
+
386
+
387
+
388
+ ### HF transformers + Deepspeed
389
+
390
+ probably should test zero2 and zero3
391
+
392
+ ```
393
+ PYTHONPATH="src" \
394
+ HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \
395
+ deepspeed --num_nodes 1 --num_gpus 4 \
396
+ examples/pytorch/language-modeling/run_clm.py \
397
+ --model_name_or_path $WORK/hf/megatron-lm/checkpoints/megatron-gpt2-345m \
398
+ --dataset_name wikitext \
399
+ --dataset_config_name wikitext-2-raw-v1 \
400
+ --output_dir output_dir \
401
+ --overwrite_output_dir \
402
+ --do_train \
403
+ --do_eval \
404
+ --max_train_samples 160 \
405
+ --max_eval_samples 160 \
406
+ --per_device_train_batch_size 4 \
407
+ --per_device_eval_batch_size 4 \
408
+ --num_train_epochs 1 \
409
+ --warmup_steps 8 \
410
+ --block_size 64 \
411
+ --fp16 \
412
+ --report_to none \
413
+ --deepspeed tests/deepspeed/ds_config_zero3.json
414
+
415
+ ```
jz/archs/gpt2.md ADDED
@@ -0,0 +1,863 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GPT2 Comparisons
2
+
3
+ ## SLURM
4
+
5
+
6
+ 1 nodes / 4 gpus:
7
+
8
+ ```
9
+ srun --pty --nodes=1 --ntasks=4 --cpus-per-task=10 --gres=gpu:4 --hint=nomultithread --time=60 bash
10
+ ```
11
+
12
+ For multi-node versions of these scripts please see `$six_ALL_CCFRWORK/code/bigscience/jz/slurm`.
13
+
14
+
15
+ ## Data
16
+
17
+ Using OpenWebText https://huggingface.co/datasets/openwebtext
18
+
19
+ ```
20
+ from datasets import load_dataset
21
+ dataset = load_dataset("openwebtext", split='train')
22
+ dataset = load_dataset("stas/openwebtext-10k", split='train')
23
+ ```
24
+
25
+ Ready datasets:
26
+
27
+ 1. HF datasets use:
28
+
29
+ * `openwebtext` - 8M records `--dataset_name "openwebtext"`
30
+ * `stas/openwebtext-10k` - 10K records `--dataset_name "stas/openwebtext-10k"`
31
+
32
+ 2. Jsonlines (derived):
33
+
34
+ * `$six_ALL_CCFRWORK/datasets-custom/openwebtext/openwebtext.jsonl`
35
+ * `$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/openwebtext-10k.jsonl`
36
+
37
+ 3. Megatron-preprocessed datasets (derived):
38
+
39
+ * `$six_ALL_CCFRWORK/datasets-custom/openwebtext/meg-gpt2_text_document.*`
40
+ * `$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document.*`
41
+
42
+
43
+
44
+
45
+ #### How the above was done
46
+
47
+ To convert to jsonlines for Megatron
48
+
49
+ run on a beefy cpu instance (but firewalled), e.g.:
50
+ ```
51
+ srun --pty --nodes=1 --ntasks=1 --cpus-per-task=32 --gres=gpu:0 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
52
+ ```
53
+
54
+ Get vocabs:
55
+ ```
56
+ cd $six_ALL_CCFRWORK/datasets-custom/vocabs
57
+ wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json
58
+ wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt
59
+ ```
60
+
61
+ small
62
+ ```
63
+ mkdir -p $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k
64
+ cd $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k
65
+ $six_ALL_CCFRWORK/code/bigscience/data/megatron/openwebtext-to-jsonl.py -10k
66
+ ```
67
+
68
+ full (needs lots or RAM)
69
+ ```
70
+ mkdir -p $six_ALL_CCFRWORK/datasets-custom/openwebtext
71
+ cd $six_ALL_CCFRWORK/datasets-custom/openwebtext
72
+ HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 $six_ALL_CCFRWORK/code/bigscience/data/megatron/openwebtext-to-jsonl.py
73
+ ```
74
+
75
+ To prep a 10k-sample for megatron
76
+ ```
77
+ cd $six_ALL_CCFRWORK/code/megatron-lm
78
+ python tools/preprocess_data.py \
79
+ --input $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/openwebtext-10k.jsonl \
80
+ --output-prefix $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2 \
81
+ --vocab $six_ALL_CCFRWORK/datasets-custom/vocabs/gpt2-vocab.json \
82
+ --dataset-impl mmap \
83
+ --tokenizer-type GPT2BPETokenizer \
84
+ --merge-file $six_ALL_CCFRWORK/datasets-custom/vocabs/gpt2-merges.txt \
85
+ --append-eod \
86
+ --workers 8
87
+ ```
88
+
89
+ To prep a full dataset for megatron
90
+ ```
91
+ cd $six_ALL_CCFRWORK/code/megatron-lm
92
+ python tools/preprocess_data.py \
93
+ --input $six_ALL_CCFRWORK/datasets-custom/openwebtext/openwebtext.jsonl \
94
+ --output-prefix $six_ALL_CCFRWORK/datasets-custom/openwebtext/meg-gpt2 \
95
+ --vocab $six_ALL_CCFRWORK/datasets-custom/vocabs/gpt2-vocab.json \
96
+ --dataset-impl mmap \
97
+ --tokenizer-type GPT2BPETokenizer \
98
+ --merge-file $six_ALL_CCFRWORK/datasets-custom/vocabs/gpt2-merges.txt \
99
+ --append-eod \
100
+ --workers 8
101
+ ```
102
+ as it should take a few hours to convert, use `slurm/jsonl-to-meg-gpt2.slurm` job to complete it
103
+ ```
104
+ sbatch jsonl-to-meg-gpt2.slurm
105
+ ```
106
+
107
+
108
+ ## Model
109
+
110
+
111
+ Ready pretrained models: GPT2 megatron_lm_345m
112
+
113
+ 1. HF
114
+
115
+ * `$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron-gpt2-345m`
116
+
117
+ 2. Megatron
118
+
119
+ * `$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release`
120
+
121
+
122
+ #### How the above was done
123
+
124
+ **Megatron model prep**
125
+
126
+
127
+ 1. Download nvidia checkpoint:
128
+ ```
129
+ wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_lm_345m/versions/v0.0/zip -O megatron_lm_345m_v0.0.zip
130
+ ```
131
+ 2.
132
+ ```
133
+ unzip megatron_lm_345m_v0.0.zip
134
+ ```
135
+
136
+
137
+ **HF transformers model prep**
138
+
139
+
140
+ prep HF model - it's not avaliable on the hub
141
+
142
+ 1. Download nvidia checkpoint:
143
+ ```
144
+ wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_lm_345m/versions/v0.0/zip -O megatron_lm_345m_v0.0.zip
145
+ ```
146
+
147
+ 2. Convert:
148
+ ```
149
+ python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py megatron_lm_345m_v0.0.zip
150
+ ```
151
+
152
+ 3. Fetch missing files
153
+ ```
154
+ git clone https://huggingface.co/nvidia/megatron-gpt2-345m/
155
+ ```
156
+
157
+ 4. Move the converted files into the cloned model dir
158
+ ```
159
+ mv config.json pytorch_model.bin megatron-gpt2-345m/
160
+ ```
161
+
162
+ 5. megatron-gpt2-345m dir should now have all the files which can be passed as `--model_name_or_path megatron-gpt2-345m`
163
+
164
+
165
+ XXX: may be will use some small samples for testing - need .txt and .json for megatron-lm
166
+
167
+ ```
168
+ #--train_file {data_dir}/sample_text.txt \
169
+ #--validation_file {data_dir}/sample_text.txt \
170
+ ```
171
+
172
+
173
+ ## Training
174
+
175
+ ### Megatron-LM
176
+
177
+ running native https://github.com/NVIDIA/Megatron-LM
178
+
179
+ ```
180
+ cd $six_ALL_CCFRWORK/code
181
+ git clone https://github.com/NVIDIA/megatron-lm
182
+ cd megatron-lm
183
+ ```
184
+
185
+
186
+ ### Megatron: finetuning on a single GPU
187
+
188
+
189
+ Setup: 1 node / 1 gpu
190
+ ```
191
+ srun --pty --nodes=1 --ntasks=4 --cpus-per-task=10 --gres=gpu:1 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
192
+ ```
193
+
194
+ Launch training:
195
+
196
+ adding `--finetune` to work with existing checkpoint, remove to train from scratch
197
+ ```
198
+ CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release
199
+ VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json
200
+ MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt
201
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document
202
+ SAVE_CHECKPOINT_PATH=$six_ALL_CCFRWORK/checkpoints/gpt2
203
+
204
+ # --train-samples 200 \
205
+ # --lr-decay-samples 150 \
206
+ # --train-iters 100000 \
207
+ # --lr-decay-iters 320000 \
208
+ GPT_ARGS=" \
209
+ --num-layers 24 \
210
+ --hidden-size 1024 \
211
+ --num-attention-heads 16 \
212
+ --seq-length 1024 \
213
+ --max-position-embeddings 1024 \
214
+ --micro-batch-size 4 \
215
+ --global-batch-size 8 \
216
+ --lr 0.00015 \
217
+ --lr-decay-style cosine \
218
+ --min-lr 1.0e-5 \
219
+ --vocab-file $VOCAB_FILE \
220
+ --merge-file $MERGE_FILE \
221
+ --lr-warmup-fraction .01 \
222
+ --finetune \
223
+ --train-iters 1000 \
224
+ --lr-decay-iters 800 \
225
+ --fp16 \
226
+ --checkpoint-activations \
227
+ "
228
+
229
+ OUTPUT_ARGS=" \
230
+ --log-interval 10 \
231
+ --save-interval 500 \
232
+ --eval-interval 100 \
233
+ --eval-iters 10 \
234
+ "
235
+
236
+ python pretrain_gpt.py \
237
+ $GPT_ARGS \
238
+ $OUTPUT_ARGS \
239
+ --save $SAVE_CHECKPOINT_PATH \
240
+ --load $CHECKPOINT_PATH \
241
+ --data-path $DATA_PATH
242
+ ```
243
+
244
+ Speed: 0.637s / iteration
245
+
246
+
247
+
248
+ ### Megatron: finetune distributed with MP
249
+
250
+ 2 types of parallelism supported:
251
+
252
+ - `--tensor-model-parallel-size`
253
+ - `--pipeline-model-parallel-size`
254
+
255
+ To get the average throughput have to process the logfile:
256
+
257
+ ```
258
+ perl -nle 'use List::Util qw/sum/; m|elapsed time per iteration .ms.: ([\d\.]+)| && push @x, $1; END { print sum(@x)/+@x }' std-1611136.out
259
+ ```
260
+
261
+ Setup: 1 node / 4 gpus
262
+ ```
263
+ srun --pty --nodes=1 --ntasks=1 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
264
+ ```
265
+
266
+ Launch training:
267
+ ```
268
+ CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release
269
+ VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json
270
+ MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt
271
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document
272
+ SAVE_CHECKPOINT_PATH=$six_ALL_CCFRWORK/checkpoints/gpt2
273
+
274
+ GPUS_PER_NODE=4
275
+ NNODES=1
276
+
277
+ # Change for multinode config
278
+ MASTER_ADDR=localhost
279
+ MASTER_PORT=6000
280
+ NODE_RANK=0
281
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
282
+
283
+ DISTRIBUTED_ARGS=" \
284
+ --nproc_per_node $GPUS_PER_NODE \
285
+ --nnodes $NNODES \
286
+ --node_rank $NODE_RANK \
287
+ --master_addr $MASTER_ADDR \
288
+ --master_port $MASTER_PORT \
289
+ "
290
+
291
+ NLAYERS=24
292
+ NHIDDEN=1024
293
+ BATCHSIZE=4
294
+
295
+ # --train-iters 100000 \
296
+ # --lr-decay-iters 320000 \
297
+ GPT_ARGS=" \
298
+ --num-layers $NLAYERS \
299
+ --hidden-size $NHIDDEN \
300
+ --num-attention-heads 16 \
301
+ --seq-length 1024 \
302
+ --max-position-embeddings 1024 \
303
+ --micro-batch-size 4 \
304
+ --global-batch-size 16 \
305
+ --lr 0.00015 \
306
+ --lr-decay-style cosine \
307
+ --min-lr 1.0e-5 \
308
+ --finetune \
309
+ --train-iters 1000 \
310
+ --lr-decay-iters 800 \
311
+ --lr-warmup-fraction .01 \
312
+ --weight-decay 1e-2 \
313
+ --clip-grad 1.0 \
314
+ --vocab-file $VOCAB_FILE \
315
+ --merge-file $MERGE_FILE \
316
+ --fp16 \
317
+ --checkpoint-activations \
318
+ "
319
+
320
+ OUTPUT_ARGS=" \
321
+ --log-interval 10 \
322
+ --save-interval 500 \
323
+ --eval-interval 100 \
324
+ --eval-iters 10 \
325
+ "
326
+
327
+ python -m torch.distributed.launch \
328
+ $DISTRIBUTED_ARGS \
329
+ pretrain_gpt.py \
330
+ --tensor-model-parallel-size 2 \
331
+ --pipeline-model-parallel-size 2 \
332
+ $GPT_ARGS \
333
+ $OUTPUT_ARGS \
334
+ --save $SAVE_CHECKPOINT_PATH \
335
+ --load $CHECKPOINT_PATH \
336
+ --data-path $DATA_PATH \
337
+ --data-impl mmap \
338
+ --split 949,50,1 \
339
+ --distributed-backend nccl
340
+ ```
341
+
342
+
343
+ Speed: 0.560s / iteration
344
+
345
+
346
+ ### Megatron: finetune distributed with MP - multi-node
347
+
348
+
349
+ Use `jay-z/slurm/meg-gpt2-multi-node.slurm`.
350
+
351
+ Speed: 0.560s / iteration
352
+
353
+
354
+ ### Megatron-LM+Deepspeed: w/ deepspeed Pipeline
355
+
356
+ This is the version with Deepspeed's pipeline
357
+
358
+ https://github.com/microsoft/DeepSpeedExamples/blob/master/Megatron-LM-v1.1.5-3D_parallelism/examples/ds_pretrain_gpt2_pipe.sh
359
+
360
+
361
+
362
+ Setup: 1 node / 4 gpus
363
+ ```
364
+ srun --pty --nodes=1 --ntasks=1 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
365
+ ```
366
+
367
+
368
+ ```
369
+
370
+ cd $six_ALL_CCFRWORK/code/DeepSpeedExamples/Megatron-LM-v1.1.5-3D_parallelism
371
+
372
+
373
+ CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release
374
+ VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json
375
+ MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt
376
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document
377
+ SAVE_CHECKPOINT_PATH=$six_ALL_CCFRWORK/checkpoints/gpt2
378
+
379
+ GPUS_PER_NODE=4
380
+ NNODES=1
381
+
382
+ # Change for multinode config
383
+ MASTER_ADDR=localhost
384
+ MASTER_PORT=6000
385
+ NODE_RANK=0
386
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
387
+
388
+ export DLWS_NUM_WORKER=${NNODES}
389
+ export DLWS_NUM_GPU_PER_WORKER=${GPUS_PER_NODE}
390
+
391
+ config_json="./ds_config.json"
392
+
393
+
394
+ # Megatron Model Parallelism
395
+ mp_size=2
396
+ # DeepSpeed Pipeline parallelism
397
+ pp_size=2
398
+
399
+ NLAYERS=24
400
+ NHIDDEN=1024
401
+ BATCHSIZE=4
402
+ NUM_ATTN_HEADS=16
403
+
404
+
405
+ LOGDIR="tensorboard_data/${NLAYERS}l_${NHIDDEN}h_${NNODES}n_${GPUS_PER_NODE}g_${pp_size}pp_${mp_size}mp_${BATCHSIZE}b_ds4"
406
+
407
+ GAS=16
408
+
409
+ #ZeRO Configs
410
+ stage=0
411
+ reduce_scatter=true
412
+ contigious_gradients=true
413
+ rbs=50000000
414
+ agbs=5000000000
415
+
416
+ #Actication Checkpointing and Contigious Memory
417
+ chkp_layers=1
418
+ PA=true
419
+ PA_CPU=false
420
+ CC=true
421
+ SYNCHRONIZE=true
422
+ PROFILE=false
423
+
424
+ GPT_ARGS=" \
425
+ --model-parallel-size ${mp_size} \
426
+ --pipe-parallel-size ${pp_size} \
427
+ --num-layers $NLAYERS \
428
+ --hidden-size $NHIDDEN \
429
+ --num-attention-heads $NUM_ATTN_HEADS \
430
+ --seq-length 1024 \
431
+ --max-position-embeddings 1024 \
432
+ --batch-size $BATCHSIZE \
433
+ --gas $GAS \
434
+ --train-iters 1000 \
435
+ --lr-decay-iters 800 \
436
+ --save $SAVE_CHECKPOINT_PATH \
437
+ --load $CHECKPOINT_PATH \
438
+ --data-path $DATA_PATH \
439
+ --vocab-file $VOCAB_FILE \
440
+ --merge-file $MERGE_FILE \
441
+ --data-impl mmap \
442
+ --split 949,50,1 \
443
+ --distributed-backend nccl \
444
+ --lr 1.5e-4 \
445
+ --lr-decay-style cosine \
446
+ --min-lr 1.0e-5 \
447
+ --weight-decay 1e-2 \
448
+ --clip-grad 1.0 \
449
+ --warmup 0.01 \
450
+ --fp16 \
451
+ "
452
+ #--tensorboard-dir ${LOGDIR}
453
+
454
+ OUTPUT_ARGS=" \
455
+ --log-interval 10 \
456
+ --save-interval 500 \
457
+ --eval-interval 100 \
458
+ --eval-iters 10 \
459
+ "
460
+
461
+ DEEPSPEED_ARGS=" \
462
+ --deepspeed \
463
+ --deepspeed_config ${config_json} \
464
+ --zero-stage ${stage} \
465
+ --zero-reduce-bucket-size ${rbs} \
466
+ --zero-allgather-bucket-size ${agbs} \
467
+ "
468
+
469
+ if [ "${contigious_gradients}" = "true" ]; then
470
+ DEEPSPEED_ARGS="${DEEPSPEED_ARGS} \
471
+ --zero-contigious-gradients"
472
+ fi
473
+
474
+ if [ "${reduce_scatter}" = "true" ]; then
475
+ DEEPSPEED_ARGS="${DEEPSPEED_ARGS} \
476
+ --zero-reduce-scatter"
477
+ fi
478
+
479
+ CHKP_ARGS=" \
480
+ --checkpoint-activations \
481
+ --checkpoint-num-layers ${chkp_layers}"
482
+
483
+ if [ "${PA}" = "true" ]; then
484
+ CHKP_ARGS="${CHKP_ARGS} \
485
+ --partition-activations"
486
+ fi
487
+
488
+ if [ "${PA_CPU}" = "true" ]; then
489
+ CHKP_ARGS="${CHKP_ARGS} \
490
+ --checkpoint-in-cpu"
491
+ fi
492
+
493
+ if [ "${SYNCHRONIZE}" = "true" ]; then
494
+ CHKP_ARGS="${CHKP_ARGS} \
495
+ --synchronize-each-layer"
496
+ fi
497
+
498
+ if [ "${CC}" = "true" ]; then
499
+ CHKP_ARGS="${CHKP_ARGS} \
500
+ --contigious-checkpointing"
501
+ fi
502
+
503
+ if [ "${PROFILE}" = "true" ]; then
504
+ CHKP_ARGS="${CHKP_ARGS} \
505
+ --profile-backward"
506
+ fi
507
+
508
+ full_options="${GPT_ARGS} ${OUTPUT_ARGS} ${DEEPSPEED_ARGS} ${CHKP_ARGS}"
509
+
510
+ run_cmd="deepspeed --num_nodes ${NNODES} --num_gpus ${GPUS_PER_NODE} pretrain_gpt2.py $@ ${full_options}"
511
+ echo ${run_cmd}
512
+ eval ${run_cmd}
513
+
514
+ ```
515
+
516
+
517
+ ### Megatron-LM+Deepspeed: w/ deepspeed zero3/inf
518
+
519
+ This is the version with Deepspeed's Zero3/inf
520
+
521
+ https://github.com/microsoft/DeepSpeedExamples/blob/master/Megatron-LM-v1.1.5-ZeRO3/examples/ds_pretrain_gpt2-zero3.sh
522
+
523
+
524
+
525
+ Setup: 1 node / 4 gpus
526
+
527
+ ```
528
+ srun --pty --nodes=1 --ntasks=1 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
529
+ ```
530
+
531
+
532
+ ```
533
+
534
+ cd $six_ALL_CCFRWORK/code/DeepSpeedExamples/Megatron-LM-v1.1.5-ZeRO3
535
+
536
+
537
+ # Change for multinode config
538
+ MP_SIZE=1
539
+
540
+ GPUS_PER_NODE=4
541
+ NNODES=1
542
+
543
+ DLTS_NUM_WORKER=$NNODES
544
+ DLTS_NUM_GPU_PER_WORKER=$GPUS_PER_NODE
545
+
546
+ NUM_WORKERS=${DLTS_NUM_WORKER}
547
+ NUM_GPUS_PER_WORKER=${DLTS_NUM_GPU_PER_WORKER}
548
+ HIDDEN_SIZE=1024
549
+ NUM_LAYERS=24
550
+ BATCHSIZE=4
551
+ NUM_ATTN_HEADS=16
552
+
553
+ CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release
554
+ VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json
555
+ MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt
556
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document
557
+ SAVE_CHECKPOINT_PATH=$six_ALL_CCFRWORK/checkpoints/gpt2
558
+
559
+ config_json="./ds_zero_stage_3_config.json"
560
+
561
+ #ZeRO Configs
562
+ stage=3
563
+ reduce_scatter=true
564
+ contigious_gradients=true
565
+ rbs=50000000
566
+ agbs=5000000000
567
+
568
+ #Activation Checkpointing and Contigious Memory
569
+ chkp_layers=1
570
+ PA=true
571
+ PA_CPU=true
572
+ CC=true
573
+ SYNCHRONIZE=true
574
+ PROFILE=false
575
+
576
+ # TiledLinear splits, 0 is disable
577
+ TILED_LINEAR="false"
578
+ TILE_DIM=1
579
+
580
+
581
+ # Megatron Model Parallelism
582
+ LOGDIR="tboard-zero3/stage${stage}-lazyscatter-${NUM_LAYERS}l_${HIDDEN_SIZE}h_${NUM_WORKERS}n_${NUM_GPUS_PER_WORKER}g_${MP_SIZE}mp_${BATCHSIZE}b"
583
+
584
+
585
+ GPT_ARGS=" \
586
+ --model-parallel-size ${MP_SIZE} \
587
+ --num-layers $NUM_LAYERS \
588
+ --hidden-size $HIDDEN_SIZE \
589
+ --num-attention-heads ${NUM_ATTN_HEADS} \
590
+ --seq-length 1024 \
591
+ --max-position-embeddings 1024 \
592
+ --batch-size $BATCHSIZE \
593
+ --train-iters 1000 \
594
+ --lr-decay-iters 800 \
595
+ --save $SAVE_CHECKPOINT_PATH \
596
+ --load $CHECKPOINT_PATH \
597
+ --data-path $DATA_PATH \
598
+ --vocab-file $VOCAB_FILE \
599
+ --merge-file $MERGE_FILE \
600
+ --data-impl mmap \
601
+ --split 949,50,1 \
602
+ --distributed-backend nccl \
603
+ --lr 1.5e-4 \
604
+ --lr-decay-style cosine \
605
+ --min-lr 1.0e-5 \
606
+ --weight-decay 1e-2 \
607
+ --clip-grad 1.0 \
608
+ --warmup 0.01 \
609
+ --fp16 \
610
+ --scattered-embeddings \
611
+ --split-transformers \
612
+ "
613
+ #--tensorboard-dir ${LOGDIR}
614
+
615
+ OUTPUT_ARGS=" \
616
+ --log-interval 10 \
617
+ --save-interval 500 \
618
+ --eval-interval 100 \
619
+ --eval-iters 10 \
620
+ "
621
+
622
+ DEEPSPEED_ARGS=" \
623
+ --deepspeed \
624
+ --deepspeed_config ${config_json} \
625
+ --zero-stage ${stage} \
626
+ --zero-reduce-bucket-size ${rbs} \
627
+ --zero-allgather-bucket-size ${agbs} \
628
+ "
629
+
630
+ if [ "${contigious_gradients}" = "true" ]; then
631
+ DEEPSPEED_ARGS="${DEEPSPEED_ARGS} \
632
+ --zero-contigious-gradients"
633
+ fi
634
+
635
+ if [ "${reduce_scatter}" = "true" ]; then
636
+ DEEPSPEED_ARGS="${DEEPSPEED_ARGS} \
637
+ --zero-reduce-scatter"
638
+ fi
639
+
640
+ CHKP_ARGS=" \
641
+ --checkpoint-activations \
642
+ --deepspeed-activation-checkpointing \
643
+ --checkpoint-num-layers ${chkp_layers}"
644
+
645
+ if [ "${PA}" = "true" ]; then
646
+ CHKP_ARGS="${CHKP_ARGS} --partition-activations"
647
+ fi
648
+
649
+ if [ "${PA_CPU}" = "true" ]; then
650
+ CHKP_ARGS="${CHKP_ARGS} \
651
+ --checkpoint-in-cpu"
652
+ fi
653
+
654
+ if [ "${SYNCHRONIZE}" = "true" ]; then
655
+ CHKP_ARGS="${CHKP_ARGS} \
656
+ --synchronize-each-layer"
657
+ fi
658
+
659
+ if [ "${CC}" = "true" ]; then
660
+ CHKP_ARGS="${CHKP_ARGS} \
661
+ --contigious-checkpointing"
662
+ fi
663
+
664
+ if [ "${PROFILE}" = "true" ]; then
665
+ CHKP_ARGS="${CHKP_ARGS} \
666
+ --profile-backward"
667
+ fi
668
+
669
+ if [ "${TILED_LINEAR}" = "true" ]; then
670
+ tile_opt="${tile_opt} \
671
+ --memory-centric-tiled-linear \
672
+ --tile-factor=${TILE_DIM}"
673
+ fi
674
+
675
+
676
+ full_options="${GPT_ARGS} ${OUTPUT_ARGS} ${DEEPSPEED_ARGS} ${CHKP_ARGS}"
677
+
678
+ run_cmd="deepspeed --num_nodes ${NNODES} --num_gpus ${GPUS_PER_NODE} pretrain_gpt2.py ${@:2} ${full_options}"
679
+ echo ${run_cmd}
680
+ eval ${run_cmd}
681
+
682
+ ```
683
+
684
+
685
+ ### HF transformers distributed
686
+
687
+ Have to run once on a non-gpu instance which has network to retrieve the model and data files and get those cached.
688
+
689
+
690
+ ```
691
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
692
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
693
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
694
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
695
+ ```
696
+
697
+ ```
698
+ MODEL=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron-gpt2-345m
699
+ DATASET="stas/openwebtext-10k"
700
+ ```
701
+
702
+ ```
703
+ cd $six_ALL_CCFRWORK/code/transformers
704
+ #git clone https://github.com/huggingface/transformers
705
+ #cd transformers
706
+ ```
707
+
708
+ ```
709
+ source $six_ALL_CCFRWORK/start-prod
710
+
711
+ ```
712
+
713
+
714
+ first run on networked instance to get the dataset et, al.
715
+ ```
716
+ PYTHONPATH="src" \
717
+ examples/pytorch/language-modeling/run_clm.py \
718
+ --model_name_or_path $MODEL \
719
+ --dataset_name $DATASET \
720
+ --output_dir output_dir \
721
+ --overwrite_output_dir \
722
+ --do_train \
723
+ --do_eval \
724
+ --max_train_samples 160 \
725
+ --max_eval_samples 160 \
726
+ --per_device_train_batch_size 4 \
727
+ --per_device_eval_batch_size 4 \
728
+ --num_train_epochs 1 \
729
+ --warmup_steps 8 \
730
+ --block_size 64 \
731
+ --report_to none
732
+ ```
733
+
734
+
735
+ 2nd run on gpu instance w/o network
736
+ ```
737
+ PYTHONPATH="src" \
738
+ HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \
739
+ python -m torch.distributed.launch --nproc_per_node=4 \
740
+ examples/pytorch/language-modeling/run_clm.py \
741
+ --model_name_or_path $MODEL \
742
+ --dataset_name $DATASET \
743
+ --output_dir output_dir \
744
+ --overwrite_output_dir \
745
+ --do_train \
746
+ --do_eval \
747
+ --max_train_samples 1000 \
748
+ --max_eval_samples 200 \
749
+ --per_device_train_batch_size 4 \
750
+ --per_device_eval_batch_size 4 \
751
+ --num_train_epochs 1 \
752
+ --warmup_steps 8 \
753
+ --block_size 64 \
754
+ --fp16 \
755
+ --report_to none
756
+ ```
757
+
758
+ Speed:
759
+
760
+ train_samples_per_second = 5.043
761
+
762
+
763
+ let's do multi-node:
764
+
765
+ Setup: 2 nodes / 4 gpus
766
+ ```
767
+ srun --pty --nodes=2 --ntasks=8 --cpus-per-task=10 --gres=gpu:4 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
768
+ ```
769
+
770
+ Launch training:
771
+
772
+ ```
773
+ PYTHONPATH="src" \
774
+ HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \
775
+ python -m torch.distributed.launch --nnodes=2 --nproc_per_node=4 \
776
+ examples/pytorch/language-modeling/run_clm.py \
777
+ --model_name_or_path $MODEL \
778
+ --dataset_name $DATASET \
779
+ --output_dir output_dir \
780
+ --overwrite_output_dir \
781
+ --do_train \
782
+ --do_eval \
783
+ --max_train_samples 1000 \
784
+ --max_eval_samples 200 \
785
+ --per_device_train_batch_size 4 \
786
+ --per_device_eval_batch_size 4 \
787
+ --num_train_epochs 1 \
788
+ --warmup_steps 8 \
789
+ --block_size 64 \
790
+ --fp16 \
791
+ --report_to none
792
+ ```
793
+
794
+ ### HF transformers + Deepspeed + zero2
795
+
796
+
797
+
798
+ ```
799
+ PYTHONPATH="src" \
800
+ HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \
801
+ deepspeed --num_nodes 1 --num_gpus 4 \
802
+ examples/pytorch/language-modeling/run_clm.py \
803
+ --model_name_or_path $MODEL \
804
+ --dataset_name $DATASET \
805
+ --output_dir output_dir \
806
+ --overwrite_output_dir \
807
+ --do_train \
808
+ --do_eval \
809
+ --max_train_samples 1000 \
810
+ --max_eval_samples 200 \
811
+ --per_device_train_batch_size 4 \
812
+ --per_device_eval_batch_size 4 \
813
+ --num_train_epochs 1 \
814
+ --warmup_steps 8 \
815
+ --block_size 64 \
816
+ --fp16 \
817
+ --report_to none \
818
+ --deepspeed tests/deepspeed/ds_config_zero2.json
819
+ ```
820
+
821
+ Speed:
822
+
823
+ train_samples_per_second = 2.14
824
+
825
+ ### HF transformers + Deepspeed + zero3
826
+
827
+ probably should test w/o offload
828
+
829
+ ```
830
+ PYTHONPATH="src" \
831
+ HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \
832
+ deepspeed --num_nodes 1 --num_gpus 4 \
833
+ examples/pytorch/language-modeling/run_clm.py \
834
+ --model_name_or_path $MODEL \
835
+ --dataset_name $DATASET \
836
+ --output_dir output_dir \
837
+ --overwrite_output_dir \
838
+ --do_train \
839
+ --do_eval \
840
+ --max_train_samples 1000 \
841
+ --max_eval_samples 200 \
842
+ --per_device_train_batch_size 4 \
843
+ --per_device_eval_batch_size 4 \
844
+ --num_train_epochs 1 \
845
+ --warmup_steps 8 \
846
+ --block_size 64 \
847
+ --fp16 \
848
+ --report_to none \
849
+ --deepspeed tests/deepspeed/ds_config_zero3.json
850
+ ```
851
+
852
+ Speed:
853
+
854
+ train_samples_per_second = 0.952
855
+
856
+
857
+
858
+ ### HF transformers + Deepspeed + zero2 - multi-node
859
+
860
+
861
+ Use `jay-z/slurm/hf-ds-gpt2-multi-node.slurm`.
862
+
863
+ Speed: / iteration
jz/archs/t5.md ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # T5 Comparisons
2
+
3
+
4
+
5
+ ## Data
6
+
7
+ Using OpenWebText https://huggingface.co/datasets/openwebtext
8
+
9
+ ```
10
+ from datasets import load_dataset
11
+ dataset = load_dataset("openwebtext", split='train')
12
+ dataset = load_dataset("stas/openwebtext-10k", split='train')
13
+ ```
14
+
15
+
16
+ Megatron-LM t5 uses a subword-tokenized vocab from bert.
17
+
18
+ Ready datasets:
19
+
20
+ 1. HF datasets use:
21
+
22
+ * `openwebtext` - 8M records `--dataset_name "openwebtext"`
23
+ * `stas/openwebtext-10k` - 10K records `--dataset_name "stas/openwebtext-10k"`
24
+
25
+ 2. Jsonlines (derived):
26
+
27
+ * `$six_ALL_CCFRWORK/datasets-custom/openwebtext/openwebtext.jsonl`
28
+ * `$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/openwebtext-10k.jsonl`
29
+
30
+ 3. Megatron-preprocessed datasets (derived):
31
+
32
+ * `$six_ALL_CCFRWORK/datasets-custom/openwebtext/meg-t5_text_document.*`
33
+ * `$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-t5_text_document.*`
34
+
35
+ 4. Vocabs (from HF):
36
+
37
+ * `$six_ALL_CCFRWORK/datasets-custom/vocabs/bert-large-uncased-vocab.txt`
38
+
39
+
40
+ #### How the above was done
41
+
42
+
43
+ For HF datasets and Jsonlines creation details, see [gpt2.md](./gpt2.md). We only need to create the differently pre-processed datasets here.
44
+
45
+ t5 uses the same tokenizer/indexer as bert - can use it for either t5 or bert meg-lm trainings
46
+
47
+ Get uncased bert vocab:
48
+ ```
49
+ cd $six_ALL_CCFRWORK/datasets-custom/vocabs
50
+ wget https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt -O bert-large-uncased-vocab.txt
51
+ ```
52
+
53
+
54
+ To prep a 10k-sample for megatron
55
+ ```
56
+ source $six_ALL_CCFRWORK/start-prod
57
+ cd $six_ALL_CCFRWORK/code/megatron-lm
58
+ python tools/preprocess_data.py \
59
+ --input $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/openwebtext-10k.jsonl \
60
+ --output-prefix $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-t5 \
61
+ --vocab $six_ALL_CCFRWORK/datasets-custom/vocabs/bert-large-uncased-vocab.txt \
62
+ --dataset-impl mmap \
63
+ --tokenizer-type BertWordPieceLowerCase \
64
+ --split-sentences \
65
+ --workers 8
66
+ ```
67
+
68
+ To prep a full dataset for megatron
69
+ ```
70
+ source $six_ALL_CCFRWORK/start-prod
71
+ cd $six_ALL_CCFRWORK/code/megatron-lm
72
+ python tools/preprocess_data.py \
73
+ --input $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/openwebtext.jsonl \
74
+ --output-prefix $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-t5 \
75
+ --vocab $six_ALL_CCFRWORK/datasets-custom/vocabs/bert-large-uncased-vocab.txt \
76
+ --dataset-impl mmap \
77
+ --tokenizer-type BertWordPieceLowerCase \
78
+ --split-sentences \
79
+ --workers 8
80
+
81
+ ```
82
+ as it should take a few hours to convert, use `slurm/jsonl-to-meg-t5.slurm` job to complete it
83
+ ```
84
+ sbatch jsonl-to-meg-t5.slurm
85
+ ```
86
+
87
+
88
+
89
+
90
+ ## Training
91
+
92
+ ### Megatron-LM distributed with MP
93
+
94
+ Pipeline Parallelism is not yet support for T5 (in works)
95
+
96
+ Setup: 1 node / 4 gpus
97
+ ```
98
+ srun --pty --nodes=1 --ntasks=1 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
99
+ ```
100
+
101
+ ```
102
+ cd $six_ALL_CCFRWORK/code/megatron-lm
103
+
104
+ GPUS_PER_NODE=4
105
+
106
+ # Change for multinode config
107
+ MASTER_ADDR=localhost
108
+ MASTER_PORT=6000
109
+ NNODES=1
110
+ NODE_RANK=0
111
+ WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
112
+
113
+ VOCAB_FILE=$six_ALL_CCFRWORK/datasets-custom/vocabs/bert-large-uncased-vocab.txt
114
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-t5_text_sentence
115
+ SAVE_CHECKPOINT_PATH=$six_ALL_CCFRWORK/checkpoints/t5
116
+
117
+ DISTRIBUTED_ARGS=" \
118
+ --nproc_per_node $GPUS_PER_NODE \
119
+ --nnodes $NNODES \
120
+ --node_rank $NODE_RANK \
121
+ --master_addr $MASTER_ADDR \
122
+ --master_port $MASTER_PORT \
123
+ "
124
+
125
+ # from t5 training:
126
+ # --global-batch-size 2048 \
127
+ GPT_ARGS=" \
128
+ --num-layers 12 \
129
+ --hidden-size 768 \
130
+ --num-attention-heads 12 \
131
+ --kv-channels 64 \
132
+ --ffn-hidden-size 3072 \
133
+ --encoder-seq-length 512 \
134
+ --decoder-seq-length 128 \
135
+ --micro-batch-size 16 \
136
+ --max-position-embeddings 512 \
137
+ --train-iters 1000000 \
138
+ --lr-decay-iters 1000000 \
139
+ --lr 0.0001 \
140
+ --min-lr 0.00001 \
141
+ --lr-decay-style linear \
142
+ --lr-warmup-fraction .01 \
143
+ --weight-decay 1e-2 \
144
+ --clip-grad 1.0 \
145
+ --fp16 \
146
+ "
147
+
148
+ OUTPUT_ARGS=" \
149
+ --log-interval 10 \
150
+ --save-interval 500 \
151
+ --eval-interval 100 \
152
+ --eval-iters 10 \
153
+ "
154
+
155
+ python -m torch.distributed.launch \
156
+ $DISTRIBUTED_ARGS \
157
+ pretrain_t5.py \
158
+ --tensor-model-parallel-size 2 \
159
+ $GPT_ARGS \
160
+ $OUTPUT_ARGS \
161
+ --save $SAVE_CHECKPOINT_PATH \
162
+ --load $SAVE_CHECKPOINT_PATH \
163
+ --data-path $DATA_PATH \
164
+ --data-impl mmap \
165
+ --vocab-file $VOCAB_FILE \
166
+ --vocab-extra-ids 100 \
167
+ --split 949,50,1 \
168
+ --distributed-backend nccl
169
+
170
+
171
+
172
+ ```
jz/envs/README.md ADDED
@@ -0,0 +1,662 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Work Environment Info
2
+
3
+
4
+ ## Users and Accounts
5
+
6
+ **Accounts:**
7
+
8
+ - `six` - the BigScience allocation - our main allocation
9
+ - `ajs` - original dynamic access allocations - use it if you can as we still have resources there - but it will give low priority on scheduling - hence use primarily for jobs that can be bumped down in the queue for a few days.
10
+
11
+ To switch to `six` as the main project:
12
+ ```
13
+ idrproj -d six
14
+ ```
15
+ and logout/login.
16
+
17
+ Check which projects one belongs to: `idrproj`
18
+
19
+ **Users:**
20
+
21
+ Use `idracct six` to see which username belongs to which real person.
22
+
23
+
24
+ ## First time setup
25
+
26
+ Make sure that your `~/.bashrc` is executed on login by creating if you don't already have `~/.bash_profile` with contents:
27
+
28
+ ```
29
+ # if running bash
30
+ if [ -n "$BASH_VERSION" ]; then
31
+ # include .bashrc if it exists
32
+ if [ -f "$HOME/.bashrc" ]; then
33
+ . "$HOME/.bashrc"
34
+ fi
35
+ fi
36
+ ```
37
+
38
+ It of course could have other contents, but make sure the above is there.
39
+
40
+ Now add this to your `~/.bashrc` and run `bash` for the changes to take effect.
41
+
42
+ ```
43
+ # ~/.bashrc: executed by bash(1) for non-login shells.
44
+ [[ $- != *i* ]] && return
45
+
46
+ # Log in with correct group - relevant to all users as we have multiple groups we belong to
47
+ if [[ $(id -gn) != "six" ]]
48
+ then
49
+ newgrp six
50
+ exit
51
+ fi
52
+
53
+ # start production environment:
54
+ # this loads modules, conda and sets all the relevant env vars
55
+ alias start-prod="source $six_ALL_CCFRWORK/start-prod"
56
+
57
+ # our production conda env is here:
58
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
59
+
60
+ # SLURM / Account specific settings
61
+
62
+ # share dirs/files with the group
63
+ umask 0007
64
+
65
+ # specific caches
66
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
67
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
68
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
69
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
70
+ export DATASETS_CUSTOM=$six_ALL_CCFRWORK/datasets-custom
71
+
72
+ # shortcut
73
+ export PROD=$six_ALL_CCFRWORK
74
+
75
+ # handy shortcuts
76
+ alias myjobs="squeue -u `whoami`"
77
+
78
+ # our shared conda base
79
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
80
+ ```
81
+
82
+ note: wrt `newgrp six` - if you want to use it elsewhere and not `~/.bashrc` you may use this `newgrp - six` syntax instead, but don't use it in `~/.bashrc` or it will break many things.
83
+
84
+ Also since most of our work is at `$six_ALL_CCFRWORK` you may want to add symlinks:
85
+ ```
86
+ ln -s $six_ALL_CCFRWORK ~/prod
87
+ ln -s $six_ALL_CCFRSCRATCH ~/prod-scratch
88
+ ln -s $six_ALL_CCFRSTORE ~/prod-store
89
+ ln -s /gpfsssd/worksf/projects/rech/six/commun ~/prod-worksf
90
+ ```
91
+ and then you can quickly `cd` there w/o needing to type too much, and with the shortcut `$PROD` env var you now you can do one of 2 ways:
92
+ ```
93
+ cd ~/prod
94
+ cd $PROD
95
+ ```
96
+
97
+ Some users prefer to use the env vars, so let's try to not expect the symlinks to be there for everybody.
98
+
99
+ If you intend to use `gsutil`, add the following lines:
100
+
101
+ ```
102
+ if [ -f '/gpfsssd/worksf/projects/rech/six/commun/lib/google-cloud-sdk/path.bash.inc' ]; then . '/gpfsssd/worksf/projects/rech/six/commun/lib/google-cloud-sdk/path.bash.inc'; fi
103
+ if [ -f '/gpfsssd/worksf/projects/rech/six/commun/lib/google-cloud-sdk/completion.bash.inc' ]; then . '/gpfsssd/worksf/projects/rech/six/commun/lib/google-cloud-sdk/completion.bash.inc'; fi
104
+ ```
105
+
106
+ Without them, `gsutil` on Jean Zay fails with a hard-to-debug `TypeError: argument should be integer or bytes-like object, not 'str'` error.
107
+
108
+ ## Production environment
109
+
110
+ In order to use the production environment, run:
111
+
112
+ ```
113
+ start-prod
114
+ ```
115
+ which will:
116
+ - setup env vars
117
+ - configure nice git-prompt with lots of useful info built in
118
+ - load the right `module`s
119
+ - activate our custom production conda environment which has everything in it
120
+
121
+ so basically use it when running production scripts.
122
+
123
+ The alias should have been set in `~/.bashrc` as instructed above.
124
+
125
+ Note: the fancy [bash-git-prompt](https://github.com/magicmonty/bash-git-prompt) tells you which conda env you are in, and then which branch your are in and a ton of useful git enfo, and it was extended to tell you whether you're in the login instance (prefix `0-1`) or whether you're on a GPU instance where it then shows something like `4-40` - the 2 numbers stand for `${SLURM_NNODES}-${SLURM_CPUS_PER_TASK}` - so you know what `srun` configuration you're logged into (or the login shell where you get no nodes, with 0 gpus and 1 cpu hence `0-1`).
126
+
127
+ The production conda env `hf-prod` is too set up already, so you don't need to do anything, but here are some details on how it was done should you want to know.
128
+
129
+ Our production shared conda env is at `$six_ALL_CCFRWORK/conda`, you can make it visible by either doing this one:
130
+ ```
131
+ conda config --append envs_dirs $six_ALL_CCFRWORK/conda
132
+ ```
133
+ which will add this path to `~/.condarc` or use:
134
+ ```
135
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
136
+ ```
137
+ in your `~/.bashrc`.
138
+
139
+ You can use it for anything but please don't install anything into it (unless coordinating with others), as we want this to be a reliable environment for all to share.
140
+
141
+ Additionally you will most likely will want to do:
142
+
143
+ ```
144
+ mv ~/.conda ~/.conda-old
145
+ ln -s $six_ALL_CCFRWORK/.conda ~/.conda
146
+ ```
147
+
148
+ because otherwise conda will try to use your HOME dir which is only 3GB-large. You can then nuke `~/.conda-old` or move it elsewhere.
149
+
150
+
151
+
152
+
153
+ ## Creating production conda env
154
+
155
+ **Do not run any of the instructions in this section**. Please co-ordinate any changes to this environment on #bigscience-jz on slack since many users use it for their experiments. If you want to create your custom conda env, please read the following sections instead.
156
+
157
+ If the production environment got broken, here is how it can be re-built.
158
+
159
+ This should be done on a login instance, since we need the network.
160
+
161
+ ```
162
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
163
+
164
+ conda create -y -n hf-prod python=3.8
165
+ conda activate hf-prod
166
+
167
+ # pt-1.10.1 / cuda 11.3
168
+ conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch
169
+ pip install deepspeed
170
+
171
+ cd $six_ALL_CCFRWORK/code/transformers
172
+ pip install -e .[dev]
173
+
174
+ cd $six_ALL_CCFRWORK/code/Megatron-DeepSpeed
175
+ pip install -r requirements.txt
176
+
177
+ cd $six_ALL_CCFRWORK/code/deepspeed
178
+ ./build.sh
179
+
180
+ # to build custom tokenizers make sure that if run on JZ your `~/.cargo/config.toml` contains the following:
181
+ [net]
182
+ git-fetch-with-cli = true
183
+
184
+ # if needed first:
185
+ # git clone https://github.com/huggingface/tokenizers $six_ALL_CCFRWORK/code/tokenizers
186
+ cd $six_ALL_CCFRWORK/code/tokenizers
187
+ git checkout bigscience_fork
188
+ module load rust
189
+ pip install setuptools_rust
190
+ pip install -e bindings/python
191
+ ```
192
+
193
+ while we are going to override some of these with our custom installs, we first install these normally to get all the dependencies right.
194
+
195
+ Then finally to build apex you need a non-login instance since it is very demanding on resources and such build on the login instance will get killed:
196
+
197
+ ```
198
+ srun --pty -A six@cpu --qos=qos_cpu-dev --nodes=1 --ntasks=1 --cpus-per-task=10 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
199
+ cd $six_ALL_CCFRWORK/code/apex
200
+ ./build.sh
201
+ ```
202
+ Note: if using a no-gpu instance to build `apex` it will warn that it can't detect any GPUs but will cross-compile for several archs. But you could also tell it to build for V100 and A100 explicitly by simply adding the desired archs:
203
+
204
+ ```
205
+ TORCH_CUDA_ARCH_LIST="7.0 8.0" pip install ...
206
+ ```
207
+
208
+ ## Personal environment
209
+
210
+ You can use these dirs, which are your private spaces:
211
+
212
+ - `$WORK`
213
+ - `$SCRATCH`
214
+ - `$STORE`
215
+
216
+ So you probably want to mimic the production env,
217
+
218
+ We also agreed to use
219
+
220
+ ```
221
+ ln -s $WORK ~/user
222
+ ln -s $SCRATCH ~/user-scratch
223
+ ln -s $STORE ~/user-store
224
+ ```
225
+ and then you can quickly `cd` there w/o needing to type too much:
226
+ ```
227
+ cd ~/user
228
+ ```
229
+
230
+ Since we are going to use `~/user/...` in scripts, it now should be possible to re-use our scripts w/o modifying them. To change the script to use the production setup, it'll be just `s/user/prod/`.
231
+
232
+
233
+
234
+ ## Custom private conda env
235
+
236
+ First follow the instructions for [Production environment](production-environment) which should have already set up most things to make it very easy to add your custom conda env.
237
+
238
+ If wanting to work with variations of packages, create your own conda env, e.g. env `stas`:
239
+
240
+ ```
241
+ export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda
242
+
243
+ conda create -y -n stas python=3.8
244
+ conda activate stas
245
+ conda install pytorch torchvision cudatoolkit=11.3 -c pytorch-lts -c nvidia
246
+ pip install deepspeed
247
+
248
+ cd ~/user/code/transformers
249
+ pip install -e .[dev]
250
+
251
+ cd ~/user/code/Megatron-Deepspeed
252
+ pip install -r requirements.txt
253
+
254
+ cd ~/user/code/deepspeed
255
+ ./build.sh
256
+
257
+ cd ~/user/code/apex
258
+ ./build.sh
259
+ ```
260
+
261
+ See a special note on how to build apex in [Creating production conda env](creating-production-conda-env).
262
+
263
+
264
+ ## Login node
265
+
266
+ If the login node is heavily used by someone, one can switch to another node
267
+
268
+ `host jean-zay.idris.fr` will tell you which login nodes are currently in the alias
269
+ 
270
+ if the DNS round robin doesn't send you to another login node, you can target a specific login node (`jean-zayN.idris.fr` , with N from 1 to 5, though some might not be available so using the alias is always better)
271
+
272
+
273
+ ## Dealing with running out of disc space
274
+
275
+ Find out where disc space is used up:
276
+ ```
277
+ du -ahd1 $six_ALL_CCFRWORK | sort -rh
278
+ du -ahd1 $six_ALL_CCFRSTORE | sort -rh
279
+ ```
280
+
281
+ Find out where inodes are used up:
282
+ ```
283
+ du -ahd1 --inodes $six_ALL_CCFRWORK | sort -rh
284
+ du -ahd1 --inodes $six_ALL_CCFRSTORE | sort -rh
285
+ ```
286
+
287
+ Some busy git clones can be pruned of unused files with: `git gc`, e.g. to prune a dir with multiple-clones as sub-dirs:
288
+
289
+ ```
290
+ cd $six_ALL_CCFRWORK/code
291
+ du -hs .
292
+ du -hs --inodes .
293
+ find . -mindepth 1 -maxdepth 1 -type d -exec bash -c "cd '{}' && git gc" +
294
+ du -hs .
295
+ du -hs --inodes .
296
+ ```
297
+
298
+ ## Finding things
299
+
300
+ Our WORK is indexed by mlocate, after adding this alias:
301
+ ```
302
+ alias locate="/usr/bin/locate -d $ALL_CCFRWORK/lib/mlocate/work.db:$ALL_CCFRWORK/lib/mlocate/worksf.db"
303
+ ```
304
+ You can now do:
305
+ ```
306
+ locate -i megatron
307
+ ```
308
+ (remove `-i` if you want case-sensitive search)
309
+
310
+ the index is being updated by `$six_ALL_CCFRWORK/bin/mlocate-update` in a crontab job in `$six_ALL_CCFRWORK/cron/cron.daily/mlocate-update.slurm`.
311
+
312
+ For more details on the emulated crontab job see: [crontab](../crontab/README.md).
313
+
314
+
315
+ ## Syncing the perms
316
+
317
+ We use `umask 0007` in `~/.bashrc` to get the shared dirs have `g+rwx` perms, so that we can all operate on those, but it doesn't always help. When a tarball is extracted it will often retain the original perms on the files, so if those didn't have `w` for the group it'll remain as such. Therefore occasionally and especially after installing a new dataset please run:
318
+
319
+ We also need `g+s` on dirs, so that new dirs and files created in the sub-dir get created with the same group as the parent dir (e.g. important when `scp`-ing from outside, but also in many other cases).
320
+
321
+ Then note that `chgrp` removes the sgid bit, as it has to be restored immediately, so do not run it alone!
322
+
323
+ For some reason group perms go wrong at times. We need all files to be `g+wrxs` (dirs), `g+rw` (files), `six` (group name), so here is how to fix things back to normal:
324
+
325
+ ```
326
+ find $six_ALL_CCFRWORK -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chgrp six {} + , -execdir chmod g+rwxs {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
327
+ find $six_ALL_CCFRWORK -user `whoami` -type d ! \( -readable -executable \) -prune -o -type f -execdir chgrp six {} + , -execdir chmod g+rw {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
328
+ find /gpfsssd/worksf/projects/rech/six/commun -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chgrp six {} + , -execdir chmod g+rwxs {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
329
+ find /gpfsssd/worksf/projects/rech/six/commun -user `whoami` -type d ! \( -readable -executable \) -prune -o -type f -execdir chgrp six {} + , -execdir chmod g+rw {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
330
+ find $six_ALL_CCFRSCRATCH -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chgrp six {} + , -execdir chmod g+rwxs {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
331
+ find $six_ALL_CCFRSCRATCH -user `whoami` -type d ! \( -readable -executable \) -prune -o -type f -execdir chgrp six {} + , -execdir chmod g+rw {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
332
+ find $six_ALL_CCFRSTORE -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chgrp six {} + , -execdir chmod g+rwxs {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
333
+ find $six_ALL_CCFRSTORE -user `whoami` -type d ! \( -readable -executable \) -prune -o -type f -execdir chgrp six {} + , -execdir chmod g+rw {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
334
+ ```
335
+
336
+ If somehow we lost the sgid bit on some dirs, to restore just those:
337
+ ```
338
+ find $six_ALL_CCFRWORK -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chmod g+s {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
339
+ find /gpfsssd/worksf/projects/rech/six/commun -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chmod g+s {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
340
+ find $six_ALL_CCFRSCRATCH -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chmod g+s {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
341
+ find $six_ALL_CCFRSTORE -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chmod g+s {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)"
342
+ ```
343
+ albeit, the set of commands above should have already done the right thing, as they include `g+rwxs`.
344
+
345
+
346
+
347
+ ## Activate production script
348
+
349
+ This can be safely added at the beginning of slurm scripts:
350
+
351
+ ```
352
+ source $six_ALL_CCFRWORK/start-prod
353
+ ```
354
+
355
+ And if you made the symlink from your `$HOME`, interactively it's easier to remember to type:
356
+
357
+ ```
358
+ source $six_ALL_CCFRWORK/start-prod
359
+ ```
360
+
361
+
362
+
363
+ ## Building things from source
364
+
365
+
366
+ The building should happen on a beefy instance - or things just get killed
367
+
368
+ Normally use the free `-p compil` partition:
369
+
370
+ ```
371
+ srun --pty -A six@cpu -p compil --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
372
+ ```
373
+
374
+ if it doesn't yield use `idrsrv` ones by adding `-c 10` (10 cpu cores)
375
+ ```
376
+ srun --pty -A six@cpu -p compil -c 10 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
377
+ ```
378
+
379
+ but if it has to be really fast, use a dedicated instance with pre-allocated cpu cores:
380
+ ```
381
+ srun --pty -A six@cpu --nodes=1 --ntasks=1 --cpus-per-task=10 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
382
+ ```
383
+
384
+ same with 1 gpu if the build env requires one (neither `apex` nor `deepspeed` require one):
385
+ ```
386
+ srun --pty -A six@gpu --nodes=1 --ntasks=1 --cpus-per-task=10 --gres=gpu:1 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod
387
+ ```
388
+
389
+ `/tmp` is tiny on gpu instances, at least apex needs a big `/tmp` folder:
390
+
391
+
392
+ Quick instructions (detailed listing follow):
393
+
394
+ ```
395
+ export TMPDIR=$six_ALL_CCFRWORK/tmp
396
+ mkdir -p $TMPDIR
397
+
398
+ cd $six_ALL_CCFRWORK/code/deepspeed
399
+ ./build.sh
400
+
401
+ cd $six_ALL_CCFRWORK/code/apex
402
+ ./build.sh
403
+ ```
404
+
405
+
406
+ ### deepspeed
407
+
408
+
409
+ To pre-build deepspeed (as compared to have it built via JIT at runtime):
410
+
411
+ ```
412
+ export TMPDIR=$six_ALL_CCFRWORK/tmp
413
+ mkdir -p $TMPDIR
414
+ cd $six_ALL_CCFRWORK/code/deepspeed
415
+ ./build.sh
416
+ ```
417
+
418
+ what's in the build:
419
+ ```
420
+ $ cat build.sh
421
+ #!/bin/bash
422
+
423
+ rm -rf build
424
+
425
+ time TORCH_CUDA_ARCH_LIST="7.0 8.0" DS_BUILD_CPU_ADAM=1 DS_BUILD_UTILS=1 pip install -e . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 | tee build.log
426
+ ```
427
+
428
+ ### apex
429
+
430
+ To build apex (needed by megatron-lm):
431
+
432
+ build:
433
+ ```
434
+ cd $six_ALL_CCFRWORK/code/apex
435
+ ./build.sh
436
+ ```
437
+
438
+ what's in the build:
439
+ ```
440
+ $ cat build.sh
441
+ #!/bin/bash
442
+
443
+ pip install --global-option="--cpp_ext" --global-option="--cuda_ext" --no-cache -v --disable-pip-version-check . 2>&1 | tee build.log
444
+ ```
445
+
446
+ Note that since we are using pt/cuda-11.1 and JZ has cuda-11.2, apex won't build unless we skip the version check (which is totally not necessary - things work just fine), so should you reset the clone and removed the local patch, you can restore it with this diff: https://github.com/NVIDIA/apex/issues/988#issuecomment-726343453
447
+
448
+
449
+
450
+ ## Aliases
451
+
452
+ ```
453
+ # autogenerate the hostfile for deepspeed
454
+ # 1. deals with: SLURM_JOB_NODELIST in either of 2 formats:
455
+ # r10i1n8,r10i2n0
456
+ # r10i1n[7-8]
457
+ # 2. and relies on SLURM_STEP_GPUS=0,1,2... to get how many gpu slots per node
458
+ #
459
+ # usage:
460
+ # makehostfile > hostfile
461
+ function makehostfile() {
462
+ perl -e '$slots=split /,/, $ENV{"SLURM_STEP_GPUS"};
463
+ $slots=4 if $slots==0; # workaround 4 gpu machines
464
+ while ($ENV{"SLURM_JOB_NODELIST"} =~ m/(\w+)(?:\[([\d-,]+)\])?,?/msg) {
465
+ $b=$1; $s=$2||q[""]; $s=~s/-/../g;
466
+ print map { "$b$_ slots=$slots\n" } eval $s }'
467
+ }
468
+ ```
469
+
470
+ ```
471
+ # auto-extract the master node's address from: SLURM_JOB_NODELIST1 which may contain r10i1n3,r10i1n[5-8],r10i1n7
472
+ # so here we want r10i1n3
473
+ function get_master_address() {
474
+ perl -le '$_=$ENV{"SLURM_JOB_NODELIST"}; s/,.*//; s/-.*//; s/\[//; print'
475
+ }
476
+ ```
477
+
478
+ Better solutions for the same as above:
479
+
480
+ ```
481
+ # autogenerate the hostfile for deepspeed
482
+ # 1. deals with: SLURM_JOB_NODELIST in either of 2 formats:
483
+ # r10i1n8,r10i2n0
484
+ # r10i1n[7-8]
485
+ # 2. and relies on SLURM_STEP_GPUS=0,1,2... to get how many gpu slots per node
486
+ #
487
+ # usage:
488
+ # makehostfile > hostfile
489
+ function makehostfile() {
490
+ perl -e '$slots=split /,/, $ENV{"SLURM_STEP_GPUS"};
491
+ $slots=8 if $slots==0; # workaround 8 gpu machines
492
+ @nodes = split /\n/, qx[scontrol show hostnames $ENV{"SLURM_JOB_NODELIST"}];
493
+ print map { "$b$_ slots=$slots\n" } @nodes'
494
+ }
495
+ ```
496
+
497
+ ```
498
+ # auto-extract the master node's address from: SLURM_JOB_NODELIST1 which may contain r10i1n3,r10i1n[5-8],r10i1n7
499
+ # so here we want r10i1n3
500
+ function get_master_address() {
501
+ echo $(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
502
+ }
503
+ ```
504
+
505
+
506
+ ## Troubleshooting
507
+
508
+ ### pip install
509
+
510
+ If it's trying to install into your local `~/.local` folder it's because `pip` is in that `$PATH` before
511
+ `$six_ALL_CCFRWORK/conda/hf-prod/bin/` - push the last one to be first - or best don't install any python things locally - use conda for that. Check with `which pip` - it should be under `$six_ALL_CCFRWORK/conda/hf-prod/bin/pip`.
512
+
513
+
514
+
515
+ ### Running `py-spy` diagnostics on multiple nodes at once
516
+
517
+ To do some monitoring of multiple nodes running an `srun` job:
518
+
519
+ (This is just an example of starting a job, most of the time it'll be running already:
520
+ ```
521
+ cd ~/prod/code/tr8b-104B/bigscience/train/tr11-200B-ml/
522
+
523
+ salloc --partition=gpu_p5 --constraint=a100 --nodes=48 --ntasks-per-node=1 --cpus-per-task=64 --hint=nomultithread --gres=gpu:8 --time 20:00:00 --account=six@a100
524
+
525
+ bash 200B-n40-bf16-mono.slurm
526
+ ```
527
+
528
+ Then in another shell:
529
+
530
+ ```
531
+ squeue -u `whoami` -o "%.16i %.9P %.26j %.8T %.10M %.8l %.6D %.20S %R"
532
+ srun --overlap --jobid=1729333 --gres=gpu:0 --nodes=48 --tasks-per-node=1 --output=trace-%N.out sh -c 'source $six_ALL_CCFRWORK/start-prod; pgrep -P $(pgrep -o python) | xargs -I {} py-spy dump --pid {}' || echo "failed"
533
+ ```
534
+
535
+ This will create a log file per node, e.g. `trace-jean-zay-iam52.out` which will contain the output of the command on that node.
536
+
537
+ Notes:
538
+ - adjust `--jobid` to the desired job (output of `squeue`). If using a job array and the job id looks like `1728318_2` first translate the virtual JobId into an actual JobID:
539
+ ```
540
+ scontrol show job 1728318_2 | perl -nle 'm/JobId=(\d+)/ && print $1'
541
+ ```
542
+ - adjust `--nodes=48` to match the same setting as the original `salloc` or `srun` command
543
+ - `--overlap` allows a new job to run on nodes allocated by another job.
544
+
545
+ `py-spy`-specific notes:
546
+
547
+ - run the command via `sh`. It may be possible to run `bash`, but I run into `py-spy: Permission denied` - it shouldn't need `sudo` but something in my bash dotfile triggers this problem, even though it doesn't happen if I run bash interactively.
548
+ - `pgrep -P $(pgrep -o python)` will give the immediate children of the launcher - 8 processes per node on A100 - which is what we want most of the time.
549
+ - if you want all children and grandchildren (e.g. dataloader helpers) - can be hundreds of processes! then use just `pgrep python`
550
+
551
+
552
+
553
+ #### using ds_ssh
554
+
555
+ It's a bit tricky and doesn't work for `py-spy` (see notes in the section above - it seems to do with `bash`'s dotfiles).
556
+
557
+
558
+ ```
559
+ salloc --partition=gpu_p5 --constraint=a100 --nodes=2 --ntasks-per-node=1 --cpus-per-task=64 --hint=nomultithread --gres=gpu:8 --time 20:00:00 --account=six@a100
560
+ ```
561
+
562
+ ```
563
+ bash 20B-n2-fp16.slurm
564
+ ```
565
+
566
+ ```
567
+ function makehostfile() {
568
+ perl -e '$slots=split /,/, $ENV{"SLURM_STEP_GPUS"};
569
+ $slots=8 if $slots==0; # workaround 8 gpu machines
570
+ @nodes = split /\n/, qx[scontrol show hostnames $ENV{"SLURM_JOB_NODELIST"}];
571
+ print map { "$b$_ slots=$slots\n" } @nodes'
572
+ }
573
+ makehostfile > hostfile
574
+ ```
575
+
576
+ ```
577
+ ds_ssh -f hostfile "source ~/.pdshrc; nvidia-smi"
578
+ ```
579
+
580
+ the tricky part is to get the remote env loaded, I have a mostly ok hack, but which doesn't work for `py-spy` - something is wrong in the env.
581
+
582
+ So the special env-loading file is:
583
+ ```
584
+ $ cat ~/.pdshrc
585
+
586
+ source /etc/profile.d/z_modules.sh;
587
+
588
+ #source ~/.bashrc
589
+
590
+ module purge
591
+ #module load pytorch-gpu/py3/1.8.1
592
+ module load nvtop git git-lfs github-cli mc
593
+
594
+ # specific caches
595
+
596
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
597
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
598
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
599
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
600
+ export DATASETS_CUSTOM=$six_ALL_CCFRWORK/datasets-custom
601
+
602
+ ### CONDA ###
603
+
604
+ # >>> conda initialize >>>
605
+ # !! Contents within this block are managed by 'conda init' !!
606
+ __conda_setup="$('/gpfslocalsup/pub/anaconda-py3/2020.02/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
607
+ if [ $? -eq 0 ]; then
608
+ eval "$__conda_setup"
609
+ else
610
+ if [ -f "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" ]; then
611
+ . "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh"
612
+ else
613
+ export PATH="/gpfslocalsup/pub/anaconda-py3/2020.02/bin:$PATH"
614
+ fi
615
+ fi
616
+ unset __conda_setup
617
+ # <<< conda initialize <<<
618
+
619
+ conda activate base
620
+ conda activate /gpfswork/rech/six/commun/conda/py38-pt111
621
+ ```
622
+
623
+ `ds_ssh` uses pdsh behind the scenes.
624
+
625
+ Note that `py-spy` works just fine when actually ssh'ed to the compute node:
626
+
627
+ ```
628
+ ps aux | grep python | egrep -v '(srun|grep)' | grep `whoami` | awk '{print $2}' | xargs -I {} py-spy dump --pid {}
629
+ ```
630
+
631
+ #### using pdsh
632
+
633
+ To access just one running node it's simpler to just use `pdsh` directly.
634
+
635
+ ```
636
+ pdsh -w jean-zay-iam01 "source ~/.pdshrc; nvidia-smi"
637
+ ```
638
+
639
+
640
+ ## Older info
641
+
642
+ Probably of no use any longer, but still here in case it is needed (might move to another file).
643
+
644
+ ## Local resources
645
+
646
+ For your own personal explorations you can either create your own `conda` envr or use your local python, which has a few of issues, but it allows you to continue using JZ's pytorch `module`.
647
+
648
+ `pip install` installs into `$HOME/.local/lib/python3.7/site-packages`, however system-wide packages may take precedence. For example to do `develop` install of transformers use this workaround:
649
+ ```
650
+ git clone https://github.com/huggingface/transformers
651
+ cd transformers
652
+ pip install --user --no-use-pep517 -e .
653
+ ```
654
+
655
+ May still have to override `PYTHONPATH=$WORK/hf/transformers-master/src` (edit to wherever your clone is) if you want to emulate `develop` build. Test:
656
+ ```
657
+ export PYTHONPATH=$WORK/hf/transformers-master/src
658
+ python -c "import transformers; print(transformers.__version__)"
659
+ # 4.6.0.dev0
660
+ ```
661
+
662
+ See [`envs`](./envs) for instructions on how to build conda and packages
jz/envs/apex/build.sh ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ pip install --global-option="--cpp_ext" --global-option="--cuda_ext" --no-cache -v --disable-pip-version-check . 2>&1 | tee build.log
4
+
jz/envs/deepspeed/build.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ rm -rf build
4
+
5
+ time TORCH_CUDA_ARCH_LIST="7.0" DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_FUSED_LAMB=1 DS_BUILD_TRANSFORMER=1 DS_BUILD_STOCHASTIC_TRANSFORMER=1 DS_BUILD_UTILS=1 pip install -e . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 | tee build.log
6
+
7
+ # time TORCH_CUDA_ARCH_LIST="7.0" DS_BUILD_OPS=1 pip install -e . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 | tee build.log
math/README.md ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Handy Math
2
+
3
+
4
+ ## Estimate model training time
5
+
6
+ in days:
7
+ ```
8
+ (X billion tokens)*(8* M billion parameters)/(N_GPUs * Achieved_TFLOPs * 1e12*60*60*24)
9
+ ```
10
+
11
+ `Achieved_TFLOPs` is measured by running experiments that tune up the setup for the best throughput performance.
12
+
13
+ For example, for a 13 billion parameter model, trained for 300 billion tokens, on 256 GPUs at 45 TFLOPs would take: `(300 billion)*(8*13 billion)/(256*45*1 trillion *60*60*24) = ~31 days`
14
+
15
+ ```
16
+ $ python -c 'Btokens=300; Bmodel=13; n_gpus=256; Tflops=45; \
17
+ print(f"{Btokens*1e9*8*Bmodel*1e9/(n_gpus*Tflops*1e12*60*60*24):0.2f} days")'
18
+ 31.35 days
19
+ ```
20
+
21
+ Notes:
22
+
23
+ - the factor of 8 can be broken into `(2 x (1+2+1))` where the factor of 2 is for multiple+add, the two ones are for forward propagation and recomputation in the backward and the 2 is for the backward propagation.
24
+
25
+ contributed by Samyam Rajbhandari
26
+
27
+
28
+ ## Calculate TFLOPs
29
+
30
+ The following is an estimation formula which slightly under-reports the real TFLOPs:
31
+
32
+ TFLOPs: `model_size_in_B * 4 * 2 * seqlen * global_batch_size / (time_in_sec_per_interation * total_gpus * 1e3)`
33
+
34
+ The factor of 4 is when used with activation check-pointing, otherwise it will be 3, but for 100B+ model, activation check-pointing will always be on.
35
+
36
+ So the `3*2` is often called "model FLOPs" and `4*2` - "hardware FLOPs".
37
+
38
+ ```
39
+ perl -le '$ng=64; $ms=52; $gbs=1024; $sp=127; $seqlen=2048; print $ms*4*2*$seqlen*$gbs / ( $sp * $ng * 1e3)'
40
+ ```
41
+ (ng = total gpus, ms = model size in B, gbs = global batch size, sp = throughput in seconds)
42
+
43
+ same with bash env vars and broken down GBS into mbs*dp*gas (gas=pp_chunks):
44
+ ```
45
+ echo "($MSIZE*4*2*SEQLEN*$MICRO_BATCH_SIZE*$DP_SIZE*$GAS)/($THROUGHPUT*$NNODES*4*1000)" | bc -l
46
+ ```
47
+
48
+ - Automatically process slurm/ megatron log files, average the throughput (prints 'fail' on when the training failed w/o producing a single iteration stat):
49
+ ```
50
+ find . -type f -name "*out" -exec perl -lne 'm|elapsed time per iteration .ms.: ([\d\.]+)| && do {$x+=$1; $c++}; END { print "$ARGV " . ($c ? int($x/$c/1000) : "fail")}' {} \; | sort | grep -v fail
51
+ ```
52
+
53
+ The exact formula is in Equation 3 of Section 5.1 of the [Efficient Large-Scale Language Model Training on GPU Clusters Using Megatron-LM](https://arxiv.org/abs/2104.04473) paper. You can see the code [here](https://github.com/bigscience-workshop/Megatron-DeepSpeed/pull/251).
54
+
55
+ For Inference only it'd be:
56
+
57
+ `24Bsh^2 + 4𝐵s^2h` floating point operations per layer
58
+
59
+
60
+ ## Model sizing
61
+
62
+ ### Params as a function of the network size hyperparams
63
+
64
+ ```
65
+ NHIDDEN=4096; NLAYERS=36; SEQ_LEN=512; VOCAB_SIZE=50257; python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + s*h + 2*h) / 10**9 :.0f}B, ratio={int(h/l)}')"
66
+ ```
67
+
68
+ For full details see [Calculate model size](../experiments/gpt2-utils.md).
69
+
70
+ The BLOOM architecture hasn't used the normal positional embedding, so the formula is slightly different and it no longer depends on SEQLEN, and we have added an additional layer norm after the word embedding so `s/s*h + 2*h/4*h` in the formula above:
71
+ ```
72
+ NHIDDEN=14336; NLAYERS=70; NHEADS=112; VOCAB_SIZE=250000; python -c "h=$NHIDDEN; l=$NLAYERS; n=$NHEADS; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + 4*h) / 10**9 :.0f}B, hidden/layers ratio: {int(h/l)}, hidden/heads ratio: {int(h/n)}')"
73
+ ```
74
+
75
+ ### Width-depth tradeoff
76
+
77
+ From [The Depth-to-Width Interplay in Self-Attention](https://arxiv.org/abs/2006.12467):
78
+
79
+ ```
80
+ NLAYERS=70; python -c "import math; l=$NLAYERS; a = 5.039; b = 5.55e-2; print(f'Optimal n_params: {12 * l * math.exp(2*a) * math.exp(2*b*l) / 10**9 :.0f}B')"
81
+ ```
82
+ This seems to be less important as the number of parameters scales up, but is useful to ground the discussion.
83
+
84
+
85
+ ## Estimate total training time
86
+
87
+ Training Time Estimates. Given these throughputs, we can also estimate the total amount of time needed for end-to-end training on 𝑇 tokens. Training requires 𝐼 = 𝑇 /(𝐵 · 𝑠) iterations. Using the value of 𝐹 from equation (3) and empirical end-to-end throughputs from Table 1 (denoted by 𝑋), we can estimate total training time. We note that for the configurations in Table 1, we have 6ℎ ≫ 𝑠, 16𝑙ℎ ≫ (𝑉 + 𝑠), and 12𝑙ℎ ≫ 𝑉 . Combining these observations with equations (2) and (3), we arrive at:
88
+
89
+ End-to-end training time (seconds) ≈ 8𝑇𝑃/𝑛𝑋
90
+
91
+ Let us consider the GPT-3 model with 𝑃 =175 billion parameters as an example. This model was trained on 𝑇 = 300 billion tokens. On 𝑛 = 1024 A100 GPUs using batch size 1536, we achieve 𝑋 = 140 teraFLOP/s per GPU. As a result, the time required to train this model is 34 days. For the 1 trillion parameter model, we assume that 450 billion tokens are needed for end-to-end training. With 3072 A100 GPUs, we can achieve a per-GPU throughput of 163 teraFLOP/s, and end-to-end training time of 84 days. We believe these training times (using a reasonable number of GPUs) are practical.
92
+
93
+
94
+ This math and discussion is quoted from [Efficient Large-Scale Language Model Training on GPU Clusters Using Megatron-LM](https://arxiv.org/abs/2104.04473).
95
+
96
+ Let's explain the formula: `8𝑇𝑃/𝑛𝑋`
97
+
98
+ In the formula:
99
+
100
+ - T: number of tokens used for training in Billions
101
+ - P: number of parameters in normal numbers
102
+ - n: number of GPUs
103
+ - X: throughput per GPU in TFLOPs
104
+ - The result is in seconds, so divide by 3600*24 to get days
105
+
106
+ Example:
107
+
108
+ - T = 300B
109
+ - P = 200_000_000
110
+ - X = 150 TFLOPs (more or less the best one can get on an efficient setup on A100)
111
+ - n = 350
112
+
113
+ gives us:
114
+
115
+ ```
116
+ $ python -c 'print(f"{8*300*200_000_000/(350*150)/(3600*24):0.2f}", "days")'
117
+ 105.82 days
118
+ ```
119
+
120
+ ## Finding the checkpoint that has the amount of tokens you want
121
+
122
+ Trying to find the step at which you reached the number of tokens you want for every model size
123
+ n_samples = n_tokens / 2048
124
+ The average batch size during rampup is rampup_batch_size = 0.5 * (global_batch_size + start_batch_size) (edited)
125
+ The number of steps is rampup_samples / rampup_batch_size + (n_samples - rampup_samples) / global_batch_size = rampup_samples / 0.5 / (global_batch_size + start_batch_size) + (n_tokens / 2048 - rampup_samples) / global_batch_size. Those will all change for each model. For example for [tr11f](https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/smaller_models/tr11f-6B3-ml.slurm) at 150B tokens we have:
126
+
127
+ > - $GLOBAL_BATCH_SIZE = 512
128
+ > - --rampup-batch-size 192 32 9_765_625 which gives:
129
+ > - start_batch_size = 192
130
+ > - rampup_samples = 9,765,625
131
+ >
132
+ > so n_steps = 9,765,625 / 0.5 / (512 + 192) + (150,000,000,000 / 2048 - 9,765,625) / 512 = 151721
train/README.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Training scripts
2
+
3
+ This folder gathers training scripts for the different arch/scaling and engineering experiments. The naming convention is `tr<number>-<short-description>`. The current baseline that architecture and scaling experiments compare to is [tr3d](https://github.com/bigscience-workshop/bigscience/blob/master/train/tr3-1B3-baseline/tr3d-1B3-more-warmup.slurm). In order to launch a new experiment, you should probably start from the [arch-and-scaling template](https://github.com/bigscience-workshop/bigscience/blob/master/train/arch-and-scaling-template.slurm).
4
+
5
+ Some tips:
6
+ - [TFlops optimization](https://github.com/bigscience-workshop/bigscience/blob/master/train/tflops_optimization.md): How to make sure that given a set of hardware you optimize the speed at which you train.
7
+ - [Instrumentation](https://github.com/bigscience-workshop/bigscience/blob/master/tools/README.md): How to sync with the hub
8
+
9
+ ## Stored checkpoints
10
+
11
+ Location of the checkpoints of the trained models plus logs and anything else of importance - e.g. eval harness results:
12
+
13
+ - tr1-13B: `gs://bigscience-backups/tr1-13B/`
14
+
15
+ - tr3m-1B3-emb-norm-pile: `$six_ALL_CCFRSTORE/checkpoints/tr3m-1B3-emb-norm-pile`
16
+
17
+ - tr4-1B3-rotary: `$six_ALL_CCFRSTORE/checkpoints/
18
+ - tr4b-350M-rotary: `$six_ALL_CCFRSTORE/checkpoints/
19
+ - tr4c-1B3-rotary-oscar: `$six_ALL_CCFRSTORE/checkpoints/tr4c-1B3-rotary-oscar`
20
+
21
+ - tr6-1B3-prefix-lm: `$six_ALL_CCFRSTORE/checkpoints/tr6-1B3-prefix-lm`
22
+ - tr6-1B3-prefix-lm-unbiased-loss: `$six_ALL_CCFRSTORE/checkpoints/tr6-1B3-prefix-lm-unbiased-loss`
23
+ - tr6b-350M-prefix-lm: `$six_ALL_CCFRSTORE/checkpoints/tr6b-350M-prefix-lm`
24
+ - tr6b-350M-prefix-lm-PP2: `$six_ALL_CCFRSTORE/checkpoints/tr6b-350M-prefix-lm-PP2`
25
+ - tr6b-350M-prefix-lm-unbiased-loss: `$six_ALL_CCFRSTORE/checkpoints/tr6b-350M-prefix-lm-unbiased-loss`
26
+ - tr6c-350M-prefix-lm-reset-attention-mask: `$six_ALL_CCFRSTORE/checkpoints/tr6c-350M-prefix-lm-reset-attention-mask`
27
+ - tr6c-350M-prefix-lm-reset-attention-mask.backup: `$six_ALL_CCFRSTORE/checkpoints/tr6c-350M-prefix-lm-reset-attention-mask.backup`
28
+ - tr6d-350M-prefix-lm-pile: `$six_ALL_CCFRSTORE/checkpoints/tr6d-350M-prefix-lm-pile`
29
+ - tr6e-1B3-pile: `$six_ALL_CCFRSTORE/checkpoints/tr6e-1B3-pile`
30
+ - tr6f-1B3-oscar-no-loss-on-targets-only: `$six_ALL_CCFRSTORE/checkpoints/tr6f-1B3-oscar-no-loss-on-targets-only`
31
+ - tr6g-1B3-oscar-loss-reweighting: `$six_ALL_CCFRSTORE/checkpoints/tr6g-1B3-oscar-loss-reweighting`
32
+
33
+ - tr7a-1B3-alibi (not a real alibi pos embedding experiment - the alibi matrix were not used in this experiment): `$six_ALL_CCFRSTORE/checkpoints/tr7a-1B3-alibi`
34
+ - tr7b-350-alibi (not a real alibi pos embedding experiment - the alibi matrix were not used in this experiment): `$six_ALL_CCFRSTORE/checkpoints/tr7b-350M-alibi`
35
+ - tr7d-1B3-alibi: `six_ALL_CCFRSTORE/checkpoints/tr7d-1B3-alibi`
36
+
37
+ - tr9b-350M-swiglu: `six_ALL_CCFRSTORE/checkpoints/tr9b-350M-swiglu`
38
+ - tr9c-1B3-swiglu-pile: `six_ALL_CCFRSTORE/checkpoints/tr9b-1B3-swiglu-pile`
train/memory.md ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Memory Utilization
2
+
3
+ # Activation Partitioning
4
+
5
+ > Activation Partitioning is a memory optimization in ZeRO that can reduce the memory consumed by activations during model parallel training (MP). In MP certain activations maybe required by all MP processes, resulting in a replication of activations across MP GPUs. Activation Partitioning stores these activations in a partitioned state once they are used for computation in the forward propagation. These activations are allgathered right before they are needed again during the backward propagation. By storing activations in a partitioned state, ZeRO in DeepSpeed can reduce the activation memory footprint proportional to the MP degree.
6
+
7
+ To activate add `--partition-activations`
train/sanity-checks.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Sanity Checks
2
+
3
+ When configuring the slurm script must ensure the following is strictly exact:
4
+
5
+
6
+ 1.
7
+
8
+ players:
9
+ - NHIDDEN
10
+ - NHEADS
11
+
12
+ ```
13
+ NHIDDEN % NHEADS == 0
14
+ ```
15
+
16
+ 2.
17
+
18
+ players:
19
+ - GLOBAL_BATCH_SIZE
20
+ - MICRO_BATCH_SIZE
21
+ - DP_SIZE
22
+
23
+ ```
24
+ GLOBAL_BATCH_SIZE % (MICRO_BATCH_SIZE * DP_SIZE) == 0
25
+ ```
26
+
27
+ 3.
28
+
29
+ players:
30
+ - NLAYERS
31
+ - PP_SIZE
32
+
33
+ ```
34
+ NLAYERS % PP_SIZE == 0
35
+ ```
36
+
37
+ 4.
38
+
39
+
40
+
41
+
42
+ 5. Curriculum Learning Constraints
43
+
44
+ - min_difficulty % 8 = 0 (to enable Tensor Core acceleration)
45
+
46
+ - json ds config can't have numbers with '_' in them - invalid json - careful with substitutions.
47
+
48
+
49
+ ## Restaring from existing checkpoint constraints
50
+
51
+ XXX: quite a few of these - need to start collecting them all
52
+
53
+ - can't change TP-size (But ok to change PP)
54
+
55
+ - can't change max-lr or will get:
56
+
57
+ ```
58
+ AnnealingLR: class input value 1e-05 and checkpointvalue 3e-05 for learning rate do not match
59
+ ```
train/tr11-176B-ml/backup-schedule.md ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Backup Schedule
2
+
3
+ First start an internet instance that won't get killed:
4
+
5
+ ```
6
+ srun --pty -A six@cpu -p compil --hint=nomultithread --time=20:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod
7
+ ```
8
+
9
+ then back up:
10
+
11
+ ## logs and eval-results (tiny)
12
+
13
+ ```
14
+ gsutil rsync -x ".git" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/tr11-176B-ml-logs gs://bigscience-backups/tr11-176B-ml/tr11-176B-ml-logs
15
+ gsutil rsync -x ".git" -r $six_ALL_CCFRSTORE/checkpoints/tr11-176B-ml/eval-results gs://bigscience-backups/tr11-176B-ml/tr11-176B-ml-eval-results
16
+ ```
17
+
18
+
19
+ ## full checkpoint (2.3TB)
20
+
21
+ 12 checkpoints: total 27TB
22
+
23
+ ```
24
+ # done
25
+
26
+ gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step3000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step3000
27
+ gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step10000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step10000
28
+ gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step20000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step20000
29
+ gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step30000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step30000
30
+ gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step40000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step40000
31
+ gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step50000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step50000
32
+ gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step60000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step60000
33
+ gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step70000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step70000
34
+ gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step80000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step80000
35
+ gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step90000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step90000
36
+ gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step95000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step95000
37
+
38
+ # in-progress
39
+
40
+
41
+
42
+ # todo:
43
+
44
+ gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step100000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step100000
45
+ gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step110000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step110000
46
+ gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step120000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step120000
47
+
48
+ ```
49
+
50
+
51
+
52
+ ## weights only checkpoints (0.33TB)
53
+
54
+
55
+ 40 checkpoints: total 13TB - autogenerate the schedule:
56
+ ```
57
+ perl -le 'print qq[gsutil rsync -x "bf16.*" -r \$six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step$_ gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step$_] for map { $_*3000 } 1..40'
58
+ ```
59
+
60
+ ```
61
+ # done
62
+
63
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step3000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step3000
64
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step6000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step6000
65
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step9000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step9000
66
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step12000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step12000
67
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step15000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step15000
68
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step18000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step18000
69
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step21000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step21000
70
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step24000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step24000
71
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step27000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step27000
72
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step33000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step33000
73
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step36000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step36000
74
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step39000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step39000
75
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step42000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step42000
76
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step45000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step45000
77
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step48000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step48000
78
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step51000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step51000
79
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step54000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step54000
80
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step57000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step57000
81
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step63000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step63000
82
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step66000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step66000
83
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step69000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step69000
84
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step72000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step72000
85
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step75000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step75000
86
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step78000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step78000
87
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step81000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step81000
88
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step84000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step84000
89
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step87000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step87000
90
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step93000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step93000
91
+
92
+ # in-progress
93
+
94
+
95
+
96
+ # todo:
97
+
98
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step96000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step96000
99
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step99000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step99000
100
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step102000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step102000
101
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step105000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step105000
102
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step108000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step108000
103
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step111000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step111000
104
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step114000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step114000
105
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step117000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step117000
106
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step120000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step120000
107
+
108
+ ```
109
+
110
+
111
+
112
+ ## spikes
113
+
114
+ weights only
115
+
116
+ ```
117
+ # done
118
+
119
+
120
+
121
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSTORE/checkpoints/tr11-176B-ml/checkpoints/spikes/global_step31200 gs://bigscience-backups/tr11-176B-ml/checkpoints-spikes-weights/global_step31200
122
+ gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSTORE/checkpoints/tr11-176B-ml/checkpoints/spikes/global_step31259 gs://bigscience-backups/tr11-176B-ml/checkpoints-spikes-weights/global_step31259
123
+
124
+ # in-progress
125
+
126
+
127
+
128
+ # todo:
129
+
130
+ ```
131
+
132
+
133
+ ## Tarring the checkpoints in STORE
134
+
135
+ Since we don't have too many inodes in STORE we ought to tar the checkpoints
136
+
137
+ ```
138
+ cd /gpfsdsstore/projects/rech/six/commun/checkpoints/tr11-176B-ml/checkpoints
139
+ cd 1
140
+ find * -maxdepth 0 -type d -exec tar cvf {}.tar {} \;
141
+
142
+ ```
train/tr11-176B-ml/chronicles-prequel.md ADDED
@@ -0,0 +1,1394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Prequel
2
+
3
+ Trials and tribulations prior to the start of training.
4
+
5
+ For the trials and tribulation during the training see: [chronicles](chronicles.md).
6
+
7
+ # A100 experiments
8
+
9
+ 200B
10
+
11
+ torch.optim.Adam:
12
+
13
+ 16 nodes:
14
+ - 1st node: 61GB
15
+ - all nodes: 47GB
16
+ - performance: XXX
17
+
18
+ apex.optimizers.FusedAdam
19
+
20
+ 16 nodes:
21
+ - 1st node: 51GB
22
+ - all nodes: 44GB
23
+ - performance: XXX
24
+
25
+
26
+
27
+ ## Size
28
+
29
+
30
+ Here are some existing models around the same size with NLAYERS / NHIDDEN and their ratio:
31
+
32
+
33
+ | origin | size | layers | hidden | ratio |
34
+ | ------ | --- | -----: | -----: | ----: |
35
+ | bs | 104B | 64 | 11600 | 180 |
36
+ | meg-lm | 145B | 80 | 12288 | 154 |
37
+ | openai | 175B | 96 | 12288 | 128 |
38
+ | meg-lm | 310B | 96 | 16384 | 170 |
39
+ | msft | 530B | 105 | 20480 | 195 |
40
+ | | | | | |
41
+
42
+
43
+
44
+
45
+ Possible ideas:
46
+
47
+ - 205B: 112 / 12288 (ratio: 109) narrow
48
+ - 206B: 96 / 13312 (ratio: 139) closer to typical 150-200 ratio
49
+
50
+ Formula to get model size, used 150k dict roughly - need to update:
51
+ ```
52
+ NHIDDEN=12288; NLAYERS=112; SEQ_LEN=2048; VOCAB_SIZE=150257; python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + s*h + 2*h) / 10**9 :.0f}B, ratio={int(h/l)}')"
53
+ ```
54
+
55
+ ### 104B topology / memory usage
56
+
57
+ Looking at the current 104B topology to try to estimate the 200B model, though many things are different.
58
+
59
+ ```
60
+ NLAYERS=64
61
+ NHIDDEN=11600
62
+ NHEADS=80
63
+ SEQ_LEN=2048
64
+ VOCAB_SIZE=50257
65
+ ```
66
+
67
+ 32 GB gpus.
68
+
69
+ TP=4, PP=32
70
+
71
+ breakdown:
72
+
73
+ 104B:
74
+
75
+ - embedding size: `v*h`: `50257*11600` = 582_981_200 / 4 (TP=4) => 145_745_300 params per gpu for embedding
76
+ - one layer size: `12*h**2 + 13*h`: 1_614_870_800 / 4 (TP=4) => 403_717_700 params per gpu per layer
77
+
78
+ 64 layers over PP=32 => 2 layers per gpu
79
+
80
+ Total params per gpu:
81
+ - gpu w/ emb: `2*403_717_700 + 145_745_300` = 953_180_700 params * 18 bytes = 17_157_252_600 bytes (17GB)
82
+ - gpu w/o emb: `2*403_717_700` = 807_435_400 params * 18 bytes = 14_533_837_200 (15GB)
83
+
84
+ plus activations memory
85
+
86
+ Checking the actual GPU allocations (nvidia-smi) - also need to take into account the cuda kernels (1271MiB)
87
+
88
+ - 22GB (w/ embed) (4GB activations memory)
89
+ - 18GB (w/o embed) (2GB activations memory)
90
+
91
+ ## Hardware
92
+
93
+ 384 A100s 80GB / 8 gpus per node
94
+
95
+ We can plan to use 384 gpus out of 416 as 4 nodes of 8 gpus need to remain reserved for when some nodes happen to be down.
96
+
97
+ Initially we will have only 144 gpus and then around mid-Feb we should have the rest.
98
+
99
+ ## Possible config:
100
+
101
+ So a possible config is
102
+
103
+ - a single replica needs to fit 96 gpus and then we can do DP=4 to a full 384 gpus
104
+
105
+ - extrapolating from the current 104B setup we can have: TP=4/PP=24 @ 80GB + 150K vocab size (which is different from the 50k vocab in 104B - 3x bigger embed matrix plus bigger hidden size.
106
+
107
+ - most likely the embedding layer now will need to be partitioned together with the transformer blocks to do a good balancing of resources. e.g. in the current 1.3B ml setup, the 1st and last gpus use all of DRAM, but the rest of gpus use only 1/2 DRAM - and TLOPs are ~21 which is very underutilized.
108
+
109
+
110
+ ### Possible topologies for 200B
111
+
112
+ 206B:
113
+
114
+ ```
115
+ NLAYERS=96
116
+ NHIDDEN=13312
117
+ NHEADS=XXX
118
+ SEQ_LEN=2048
119
+ VOCAB_SIZE=150_000
120
+ ```
121
+
122
+ Overall we know that DP is the fastest, then PP, then TP - but for PP to be efficient we need a big bs.
123
+
124
+ The following math is trying various topologies to fit into 80GB gpus
125
+
126
+
127
+ * TP=4, PP=24
128
+
129
+ - embedding size: `v*h: 150257*13312` = `2_000_221_184 / 4` (TP=4) => 500_055_296 params per gpu for embedding
130
+ - one layer size: `12*h**2 + 13*h`: `2_126_685_184 / 4` (TP=4) => 531_671_296 params per gpu per layer
131
+
132
+ In other words 2B params per layer w/o TP, or 38GB (`2.12*18`) per layer.
133
+
134
+ So here we definitely need to balance embedding layer with transformer layers as they are of the same size, so overall 2+layers blocks to balance - and the constraint won't be Layers % PP = 0 but Layers+2 % PP = 0
135
+
136
+ So probably should do 94 layers?
137
+
138
+ 94+2 layers over PP=24 => 4 layers per gpu
139
+
140
+ Total params per gpu (considering emb layer on par with transformers block):
141
+ - `4*531_671_296` = `2_126_685_184 params * 18` = 38_280_333_312 bytes
142
+ plus activations memory
143
+
144
+ 40GB A100 takes 1573MiB for cuda kernels (probably about the same for 80GB? may be a bit larger)
145
+ `python -c "import torch; import time; torch.ones(1).cuda(); time.sleep(30)"` + check `nvidia-smi` output.
146
+
147
+
148
+
149
+ * TP=1, PP=96
150
+
151
+ ~2B params per layer w/o TP, or 38GB (`2.12*18`) per layer.
152
+
153
+ but DS breaks if there isn't at least one transformer block per gpu :(
154
+ otherwise could do a very efficient:
155
+
156
+ ```
157
+ 1 | 2 | 3 ... | 95 | 96
158
+ emb | transf | transf ....| transf | emb
159
+ ```
160
+
161
+ So in this scenario no TP is needed, which should make the assembly much faster. But will require DS fixing their side. or perhaps we could somehow hack on a dummy layer which will be like transformers? e.g. if it's the first or last layer it'd be an identity forward.
162
+
163
+ Also the pipeline will be super long here, which to make efficient will require a huge global batch size.
164
+
165
+
166
+
167
+ * with TP=2, PP=48
168
+
169
+ 1_063_342_592 params per layer, 19_140_166_656 bytes (19GB) per layer
170
+
171
+ perhaps could squeeze 3 layers per gpu - but of course each gpu will be less efficient since it will have to do 3 pipe stages.
172
+
173
+ * Other considerations
174
+
175
+ Of course, we could make the model wider and shallower so for example with TP=1 perhaps we could fit a bit more width and use less layers. e.g. 530B model was NLAYERS=105, NHIDDEN=20480 - so it's much wider.
176
+
177
+
178
+
179
+ ## Reconsiderations
180
+
181
+ After discussing the above plans with the NVIDIA and DeepSpeed experts it appears that:
182
+
183
+ 1. on A100 and especially with much larger models TP>1 is much more beneficial and typically NVIDIA almost always uses TP=gpus_per_node for large models.
184
+
185
+ 2. A very deep PP (96) would be very difficult to keep efficient unless the batch size per replica is huge.
186
+
187
+ 3. Too many layers isn't great either:
188
+
189
+ Jared Casper writes:
190
+
191
+ > Regarding hidden size vs transformer layer (width vs depth), some feedback I got is that there isn't really a magic formula/process. We increase depth with the width but not as drastically as a typical vision model scaling. So you shouldn't go too crazy with depth. The width is somewhat constrained by sizes good for the GPU, so it seems a strategy is to push out the width but keep it nice numbers, then fill out with depth. You'll notice even at 530B params we only went to 105 layers.
192
+
193
+
194
+ ## Existing models
195
+
196
+ Let's first analyse a few existing models and see how they fit 80GB A100 8-gpu nodes.
197
+
198
+
199
+ * 145B meg-lm
200
+
201
+ ```
202
+ NHIDDEN=12288; NLAYERS=80; SEQ_LEN=2048; VOCAB_SIZE=50257; python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + s*h + 2*h) / 10**9 :.0f}B, ratio={int(h/l)}')"
203
+ Model size: 146B, ratio=153
204
+ ```
205
+
206
+ ```
207
+ NHIDDEN=12288; VOCAB_SIZE=50257; TP=8; python -c "h=$NHIDDEN; v=$VOCAB_SIZE; tp=$TP; emb=v*h/10**6; blk=(12*h**2+13*h)/10**6; print(f'emb size: {emb:.2f}M/{emb*18:.2f}GB, per gpu {emb/tp:.2f}M/{emb*18/tp:.2f}GB'); print(f'blk size: {blk:.2f}M/{blk*18:.2f}GB, per gpu {blk/tp:.2f}M/{blk*18/tp:.2f}GB')"
208
+ emb size: 617.56M/11116.04GB, per gpu 77.19M/1389.51GB
209
+ blk size: 1812.10M/32617.78GB, per gpu 226.51M/4077.22GB
210
+ ```
211
+
212
+ MP=64: TP=8, PP=8: one replica 64 gpus
213
+
214
+ so 80/8=10 PP stages per gpu: `10*4` =40GB of weights/optim states/grads per gpu
215
+
216
+
217
+ * 310B meg-lm
218
+
219
+ ```
220
+ NHIDDEN=16384; NLAYERS=96; SEQ_LEN=2048; VOCAB_SIZE=50257; python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + s*h + 2*h) / 10**9 :.0f}B, ratio={int(h/l)}')"
221
+ Model size: 310B, ratio=170
222
+ ```
223
+
224
+ MP=128: TP=8, PP=16: one replica 128 gpus
225
+
226
+ ```
227
+ NHIDDEN=16384; VOCAB_SIZE=50257; TP=8; python -c "h=$NHIDDEN; v=$VOCAB_SIZE; tp=$TP; emb=v*h/10**6; blk=(12*h**2+13*h)/10**6; print(f'emb size: {emb:.2f}M/{emb*18:.2f}GB, per gpu {emb/tp:.2f}M/{emb*18/tp:.2f}GB'); print(f'blk size: {blk:.2f}M/{blk*18:.2f}GB, per gpu {blk/tp:.2f}M/{blk*18/tp:.2f}GB')"
228
+ emb size: 823.41M/14821.39GB, per gpu 102.93M/1852.67GB
229
+ blk size: 3221.44M/57985.89GB, per gpu 402.68M/7248.24GB
230
+ ```
231
+
232
+ so `96/16=6` PP stages per gpu: `6*7.3` ~44GB of weights/optim states/grads per gpu
233
+
234
+ * 530B msft
235
+
236
+
237
+ ```
238
+ NHIDDEN=20480; NLAYERS=105; SEQ_LEN=2048; VOCAB_SIZE=50257; python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + s*h + 2*h) / 10**9 :.0f}B, ratio={int(h/l)}')"
239
+ Model size: 310B, ratio=170
240
+ ```
241
+
242
+
243
+ MP=280: TP=8, PP=35: one replica 280 gpus
244
+
245
+ (actually don't know the vocab size here, but it doesn't matter much)
246
+
247
+ ```
248
+ NHIDDEN=20480; VOCAB_SIZE=50257; TP=8; python -c "h=$NHIDDEN; v=$VOCAB_SIZE; tp=$TP; emb=v*h/10**6; blk=(12*h**2+13*h)/10**6; print(f'emb size: {emb:.2f}M/{emb*18:.2f}GB, per gpu {emb/tp:.2f}M/{emb*18/tp:.2f}GB'); print(f'blk size: {blk:.2f}M/{blk*18:.2f}GB, per gpu {blk/tp:.2f}M/{blk*18/tp:.2f}GB')"
249
+ emb size: 1029.26M/18526.74GB, per gpu 128.66M/2315.84GB
250
+ blk size: 5033.43M/90601.76GB, per gpu 629.18M/11325.22GB
251
+ ```
252
+
253
+ so 105/35=3 PP stages per gpu: `6*7.3` = ~33.9GB of weights/optim states/grads per gpu
254
+
255
+
256
+ To summarize we can see the setup is so that about half the gpu is loaded with weights / optim states / grad `*18`)
257
+
258
+ ## Possible 200B models
259
+
260
+
261
+ So first let's try to come up with wider and shallower model to fit 200B, or wide if shallow doesn't work out too well topology/efficiency-wise
262
+
263
+
264
+ ### 199B: 80 x 14336 (layers x hidden)
265
+
266
+ ```
267
+ NHIDDEN=14336; NLAYERS=80; SEQ_LEN=2048; VOCAB_SIZE=150257; python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + s*h + 2*h) / 10**9 :.0f}B, ratio={int(h/l)}')"
268
+ Model size: 199B, ratio=179
269
+ ```
270
+
271
+ which gives us:
272
+
273
+ ```
274
+ NHIDDEN=14336; VOCAB_SIZE=150257; TP=8; python -c "h=$NHIDDEN; v=$VOCAB_SIZE; tp=$TP; emb=v*h/10**6; blk=(12*h**2+13*h)/10**6; print(f'emb size: {emb:.2f}M/{emb*18:.2f}GB, per gpu {emb/tp:.2f}M/{emb*18/tp:.2f}GB'); print(f'blk size: {blk:.2f}M/{blk*18:.2f}GB, per gpu {blk/tp:.2f}M/{blk*18/tp:.2f}GB')"
275
+ emb size: 2154.08M/38773.52GB, per gpu 269.26M/4846.69GB
276
+ blk size: 2466.44M/44395.87GB, per gpu 308.30M/5549.48GB
277
+ ```
278
+
279
+ TP=8, PP=10 - 80 gpus for one replica, can fit DP=4 (320/384)
280
+
281
+ so with PP=10, we get 80/10 = 8 stages per gpu = 44GB for normal layer gpus and 50GB for the 1st/last gpus due to 5G embedding, the remaining 28GB for activations (2GB is cuda kernels) - could be enough, but not sure.
282
+
283
+ If we are tight, consider giving the embedding its own layer so the total layers will be NLAYERS+2. In which case we need to change NLAYERS to be -2 than the wanted number to be able to spread out the layers evenly across gpus.
284
+
285
+ Also consider that the more tightly we pack each gpu the more PP stages it'll have - the slower it'll run.
286
+
287
+ And less GPUs means less processing power - so overall it's likely to be slower.
288
+
289
+ ### 206B: 96 x 13312 (layers x hidden)
290
+
291
+ ```
292
+ NHIDDEN=13312; NLAYERS=96; SEQ_LEN=2048; VOCAB_SIZE=150257; python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + s*h + 2*h) / 10**9 :.0f}B, ratio={int(h/l)}')"
293
+ Model size: 206B, ratio=138
294
+ ```
295
+
296
+ ```
297
+ NHIDDEN=13312; VOCAB_SIZE=150257; TP=8; python -c "h=$NHIDDEN; v=$VOCAB_SIZE; tp=$TP; emb=v*h/10**6; blk=(12*h**2+13*h)/10**6; print(f'emb size: {emb:.2f}M/{emb*18:.2f}GB, per gpu {emb/tp:.2f}M/{emb*18/tp:.2f}GB'); print(f'blk size: {blk:.2f}M/{blk*18:.2f}GB, per gpu {blk/tp:.2f}M/{blk*18/tp:.2f}GB')"
298
+ emb size: 2000.22M/36003.98GB, per gpu 250.03M/4500.50GB
299
+ blk size: 2126.69M/38280.33GB, per gpu 265.84M/4785.04GB
300
+ ```
301
+
302
+ TP=8, PP=12 => 96 gpus for one replica, can fit DP=4 (384/384)
303
+
304
+ 96/12 = 8 stages per gpu = ~40GB per gpu, same number of PP stages per gpu and more spare memory
305
+
306
+ This might be a better fit memory-wise if the one above is too close to being full, especially on gpu 0 and -1.
307
+
308
+ It also uses the full 384 gpu allocation in a snag way.
309
+
310
+
311
+
312
+ ## Train time estimation
313
+
314
+ So A100 spec is 312 TFLOPS for BF16, so probably the best would be 50% of that so 150 TFLOPs (which we probably won't reach, but let's see), so yes I agree 150 is a bit too optimistic, but let's use it as the best case scenario.
315
+
316
+
317
+ Also we still don't know how many gpus we will end up using, but let's say we use them all - 350. Once we decide on the topology we will be able to replace 350 with the actual number of gpus we plan to use.
318
+
319
+ ```
320
+ $ python -c 'print(f"{8*300*200_000_000/(350*150)/(3600*24):0.2f}", "days")'
321
+ 105.82 days
322
+ ```
323
+
324
+ so 3.5 months in the best case scenario. But more likely 150-200 days since it'll be less of everything plus potential issues. We will know more once we get access to 1 replica as then we should get a much better TFLOPs estimation, which will then be less for DP>1.
325
+
326
+ And this estimate is w/o encountering any problems, which is unlikely, so add more overhead for rollbacks and restarts.
327
+
328
+ Additionally this number is too optimistic since we won't have the full number of GPUs till about some time in end of February.
329
+
330
+ See [Estimate total training time](../../math#estimate-total-training-time) for details of the math.
331
+
332
+ XXX: actually are we training for 300B or 400B tokens because of Multi-Lingual? in which case it'll be 1/3 longer!
333
+
334
+
335
+ ## Allocated hours sufficiency check
336
+
337
+ We currently have about 3M gpu hours left in our allocation.
338
+
339
+ Let's see how many total gpus hours the good estimation is:
340
+
341
+
342
+ ```
343
+ python -c 'print(f"{8*300*200_000_000/150/3600:0.2f}", "compute hours")'
344
+ 888888.89 compute hours
345
+ ```
346
+ So if it takes 2x longer than the best case scenario, then we say need about 2M hours, so we are fine there.
347
+
348
+ Important nuance:
349
+
350
+ We will have an exclusive access only till May, and in May we will have to share with others.
351
+
352
+ So at the moment we will have only about 3 months of having access to all gpus.
353
+
354
+
355
+
356
+ ## Best TFLOPs
357
+
358
+ To measure best TFLOPs possible use a single, so that it uses all the intra-node connections (NVLink) and doesn't touch the network:
359
+
360
+ ### fp16
361
+
362
+ - 1 node, 1 replica
363
+
364
+ 20B model: TP=8, PP=1, NLAYERS=8, NHIDDEN=14400, NHEADS=32, SEQ_LEN=2048, VOCAB_LENGTH=250k, GBS=2048
365
+
366
+ ```
367
+ iteration 2/ 95367 | consumed samples: 4096 | consumed tokens: 8388608 | elapsed time per iteration (s): 769.99 | learning rate: 3.787E-06 | global batch size: 2048 | lm loss: 6.384045E+01 | loss scale: 4096.0 | grad norm: 15906.210 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 2.660 | TFLOPs: 108.47 |
368
+ ```
369
+
370
+ - 10 nodes, 1 replica
371
+
372
+ 200B model: TP=8, PP=10, NLAYERS=80, NHIDDEN=14400, NHEADS=96, SEQ_LEN=2048, VOCAB_LENGTH=250k, GBS=2048
373
+
374
+ ```
375
+ iteration 2/ 95367 | consumed samples: 4096 | consumed tokens: 8388608 | elapsed time per iteration (s): 844.81 | learning rate: 3.787E-06 | global batch size: 2048 | lm loss: 6.373861E+01 | loss scale: 4096.0 | grad norm: 34132.119 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 2.424 | TFLOPs: 98.87 |
376
+ ```
377
+
378
+ - 20 nodes, 2 replicas
379
+
380
+ ```
381
+ iteration 2/ 95367 | consumed samples: 4096 | consumed tokens: 8388608 | elapsed time per iteration (s): 430.21 | learning rate: 3.787E-06 | global batch size: 2048 | lm loss: 6.373876E+01 | loss scale: 4096.0 | grad norm: 34027.311 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 4.761 | TFLOPs: 97.07 |
382
+ ```
383
+
384
+ It was puzzling why much less memory was used for identical set up with DP=2 over DP=1 - but it's because of ZeRO-1 that saves a lot of memory across all GPUs!
385
+
386
+
387
+ | GPUs | Size | DP | TP | PP | MBS | Mem | TFLOPs | Notes |
388
+ | ---: | ---: | -: | -: | -: | --: | ---: | -----: | ----: |
389
+ | 8 | 20B | 1 | 8 | 1 | 1 | 67GB | 108.47 | 02-17 |
390
+ | 80 | 200B | 1 | 8 | 10 | 1 | 73GB | 98.87 | 02-17 |
391
+ | 160 | 200B | 2 | 8 | 10 | 1 | 51GB | 97.07 | 02-17 |
392
+ | | | | | | | | | |
393
+
394
+ *Mem = max memory used by the first (last) nodes with the word embedding matrix - max is 77GB
395
+
396
+
397
+ ### bf16
398
+
399
+ - 1 node, 1 replica
400
+
401
+ 20B model: TP=8, PP=1, NLAYERS=8, NHIDDEN=14400, NHEADS=32, SEQ_LEN=2048, VOCAB_LENGTH=250k, GBS=2048
402
+
403
+ ```
404
+ iteration 2/ 95367 | consumed samples: 4096 | consumed tokens: 8388608 | elapsed time per iteration (s): 777.09 | learning rate: 3.787E-06 | global batch size: 2048 | lm loss: 6.381926E+01 | loss scale: 1.0 | grad norm: 2.763 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 2.635 | TFLOPs: 107.48 |
405
+ ```
406
+
407
+
408
+ - 10 nodes, 1 replica
409
+
410
+ 200B model: TP=8, PP=10, NLAYERS=80, NHIDDEN=14400, NHEADS=96, SEQ_LEN=2048, VOCAB_LENGTH=250k, GBS=2048
411
+
412
+ ```
413
+ iteration 2/ 95367 | consumed samples: 4096 | consumed tokens: 8388608 | elapsed time per iteration (s): 853.81 | learning rate: 3.787E-06 | global batch size: 2048 | lm loss: 6.369443E+01 | loss scale: 1.0 | grad norm: 4.461 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 2.399 | TFLOPs: 97.82 |
414
+ ```
415
+
416
+
417
+ - 20 nodes, 2 replicas
418
+
419
+
420
+ ```
421
+ iteration 2/ 95367 | consumed samples: 4096 | consumed tokens: 8388608 | elapsed time per iteration (s): 434.14 | learning rate: 3.787E-06 | global batch size: 2048 | lm loss: 6.369444E+01 | loss scale: 1.0 | grad norm: 6.314 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 4.717 | TFLOPs: 96.19 |
422
+ ```
423
+
424
+
425
+ | GPUs | Size | DP | TP | PP | MBS | Mem | TFLOPs | Notes |
426
+ | ---: | ---: | -: | -: | -: | --: | ---: | -----: | ----: |
427
+ | 8 | 20B | 1 | 8 | 1 | 1 | 68GB | 107.48 | 02-17 |
428
+ | 80 | 200B | 1 | 8 | 10 | 1 | 75GB | 97.82 | 02-17 |
429
+ | 160 | 200B | 2 | 8 | 10 | 1 | 53GB | 96.19 | 02-17 |
430
+ | | | | | | | | | |
431
+
432
+ *Mem = max memory used by the first (last) nodes with the word embedding matrix - max is 77GB
433
+
434
+ So we can load more stages as we get higher DP as ZeRO spreads out over more gpus - smaller shards.
435
+
436
+
437
+
438
+ ## dealing with JZ hanging on the large model
439
+
440
+ This overcomes the hanging which in general should lead to a slower throughput since all CUDA operations become synchronous and would block until they are done.
441
+
442
+ ```
443
+ export CUDA_LAUNCH_BLOCKING=1
444
+ ```
445
+
446
+ 200B, measuring 2nd iter:
447
+
448
+ | GPUs | async | GBS | TFLOPs | Notes |
449
+ | ---: | ----: | ---: | -----: | -----------: |
450
+ | 80 | no | 512 | 91.04 | |
451
+ | 80 | yes | 512 | 97.20 | |
452
+ | 160 | no | 512 | 84.59 | |
453
+ | 160 | yes | 512 | 84.44 | |
454
+ | 160 | no | 2048 | 90.29 | |
455
+ | 160 | yes | 2048 | 90.25 | may hang |
456
+ | 320 | no | 2048 | 87.78 | |
457
+ | 320 | yes | 2048 | xxxx | always hangs |
458
+ | | | | | |
459
+
460
+ async/yes == `CUDA_LAUNCH_BLOCKING=0`
461
+
462
+ Interesting. Sometimes `CUDA_LAUNCH_BLOCKING=1` impacts the speed, at other times it doesn't. Perhaps with larger set ups it's barely impacting since there is a lot more comms than the small setup.
463
+
464
+
465
+ ## Choosing the fastest 3D Topology
466
+
467
+ Benchmarking the fastest 3D topology. Constraint: can use at most 48 nodes of 8 gpu a100 80gb nodes.
468
+
469
+ Note that we want not the highest TFLOPs but the highest speed per iteration, since one can get high TFLOPs on less GPUs and overall slower speed, since we only care about how fast we can finish the training.
470
+
471
+ Also note that the model size isn't always the same as the number of layers had to be tweaked to fit PP and NHIDDEN was fixed - so speed/tflops can't be exactly compared - but can be brought back to the same size by tweaking NHIDDEN. also since for efficiency of finishing this process I take the snapshot of a single iteration (always 2nd) the data isn't exact and can fluctuate a bit. But the point of this exercise is to get a feel of which topology is superior.
472
+
473
+
474
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
475
+ | ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: |
476
+ | 48 | 200B | 12 | 8 | 4 | 1 | 2040 | 47GB | 189.06 | 91.67 | 02-20 |
477
+ | 45 | 200B | 9 | 8 | 5 | 1 | 2043 | 44GB | 208.40 | 88.84 | 02-20 |
478
+ | 48 | 194B | 8 | 8 | 6 | 1 | 2048 | 39GB | 183.64 | 92.38 | 02-20 |
479
+ | 42 | 191B | 6 | 8 | 7 | 1 | 2046 | 39GB | 202.99 | 94.20 | 02-20 |
480
+ | 48 | 200B | 6 | 8 | 8 | 1 | 2046 | 36GB | 185.75 | 93.59 | 02-20 |
481
+ | 45 | 205B | 5 | 8 | 9 | 1 | 2045 | 37GB | 199.14 | 94.23 | 02-20 |
482
+ | 40 | 200B | 4 | 8 | 10 | 1 | 2048 | 35GB | 221.21 | 94.39 | 02-20 |
483
+ | 44 | 195B | 4 | 8 | 11 | 1 | 2048 | 32GB | 197.15 | 92.67 | 02-20 |
484
+ | 48 | 183B | 4 | 8 | 12 | 1 | 2048 | 30GB | 172.40 | 90.84 | 02-20 |
485
+ | | | | | | | | | | | |
486
+
487
+ * Sec/it throughput at iteration 2
488
+
489
+ As you can see the 80GB is totally unnecessary for MBS=1 as we are bound by compute of each gpu and we barely use half the gpu memory and trying to pack more on each gpu slows the ensemble down. This is of course thanks to ZeRO which shards all fp32 optim+grad+params over all gpus - so the more gpus you use the less memory is needed to accomodate the same model size, regardless of DP/TP/PP topology. (with MBS=1 that is so that the activations don't take too much memory)
490
+
491
+ This table doesn't take into account batch size rampup which needs to be divisible by DP as it progressed from 32, 64, ... so really we have an additional constraint of `DP % 4 = 0` and `GBS % 32 = 0`.
492
+
493
+ which means from the above list only a few configs are suitable, and these are:
494
+
495
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
496
+ | ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: |
497
+ | 48 | 194B | 8 | 8 | 6 | 1 | 2048 | 39GB | 183.64 | 92.38 | 02-20 |
498
+ | 40 | 200B | 4 | 8 | 10 | 1 | 2048 | 35GB | 221.21 | 94.39 | 02-20 |
499
+ | 44 | 195B | 4 | 8 | 11 | 1 | 2048 | 32GB | 197.15 | 92.67 | 02-20 |
500
+ | 48 | 183B | 4 | 8 | 12 | 1 | 2048 | 30GB | 172.40 | 90.84 | 02-20 |
501
+ | | | | | | | | | | | |
502
+
503
+ Increasing MBS will speed up things a bit and we have a ton of spare memory to accommodate a larger MBS, but have to ensure we get the batch size ramp up sorted out. So if the rampup steps are in increments of 32 with DP=4 highest MBS is 8. and `log2(MBS) % 2 = 0`.
504
+
505
+
506
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
507
+ | ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: |
508
+ | 48 | 194B | 8 | 8 | 6 | 1 | 2048 | 39GB | 183.64 | 92.38 | 02-20 |
509
+ | 48 | 194B | 8 | 8 | 6 | 2 | 2048 | 45GB | 172.36 | 98.43 | 02-20 |
510
+ | 48 | 194B | 8 | 8 | 6 | 4 | 2048 | 56GB | 173.92 | 97.55 | 02-20 |
511
+ | 48 | 194B | 8 | 8 | 6 | 8 | 2048 | 75GB | 192.42 | 88.17 | 02-20 |
512
+ | | | | | | | | | | | |
513
+
514
+
515
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
516
+ | ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ---------------------: |
517
+ | 40 | 200B | 4 | 8 | 10 | 1 | 2048 | 35GB | 221.21 | 94.39 | 02-20 |
518
+ | 40 | 200B | 4 | 8 | 10 | 2 | 2048 | 43GB | 207.92 | 100.43 | 02-20 |
519
+ | 40 | 200B | 4 | 8 | 10 | 4 | 2048 | 55GB | 208.18 | 100.30 | 02-20 |
520
+ | 40 | 200B | 4 | 8 | 10 | 8 | 2048 | 76GB | 229.69 | 90.91 | 02-20 too close to OOM |
521
+ | | | | | | | | | | | |
522
+
523
+
524
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
525
+ | ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: |
526
+ | 44 | 195B | 4 | 8 | 11 | 1 | 2048 | 32GB | 197.15 | 92.67 | 02-20 |
527
+ | 44 | 195B | 4 | 8 | 11 | 2 | 2048 | 41GB | 186.65 | 97.89 | 02-20 |
528
+ | 44 | 195B | 4 | 8 | 11 | 4 | 2048 | 53GB | 185.79 | 98.34 | 02-20 |
529
+ | 44 | 195B | 4 | 8 | 11 | 8 | 2048 | 75GB | 206.62 | 88.42 | 02-20 |
530
+ | | | | | | | | | | | |
531
+
532
+
533
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
534
+ | ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: |
535
+ | 48 | 183B | 4 | 8 | 12 | 1 | 2048 | 30GB | 172.40 | 90.84 | 02-20 |
536
+ | 48 | 183B | 4 | 8 | 12 | 2 | 2048 | 39GB | 161.96 | 96.69 | 02-20 |
537
+ | 48 | 183B | 4 | 8 | 12 | 4 | 2048 | 50GB | 163.32 | 95.89 | 02-20 |
538
+ | | | | | | | | | | | |
539
+
540
+ The models are slightly different in size so can't compare absolute numbers.
541
+
542
+ But clearly MBS=2 is about the best, MBS=4 is close by.
543
+
544
+ If we utilize all 48 nodes then we have PP6 and PP12 as contenders.
545
+
546
+
547
+ ## tile and wave quantization
548
+
549
+
550
+ A100 80GB has 108 SMs
551
+
552
+ https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#tile-quant
553
+
554
+ ```
555
+ nhidden % 128 = 0
556
+ ```
557
+
558
+ https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#wave-quant
559
+
560
+ ```
561
+ nhidden % 108 = 0
562
+ ```
563
+
564
+ TP=8:
565
+
566
+ ```
567
+ nhidden % 8 = 0
568
+ ```
569
+
570
+ Combining all 3:
571
+
572
+ ```
573
+ nhidden = 108*8*c = 864*c
574
+ ```
575
+
576
+ which gives 864*16 = 13824 (187B) => so let's try to compare with 14400 (200B)
577
+
578
+ XXX: This is a total guestimate - need proper math
579
+
580
+ | Nodes | Size | NHIDDEN | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
581
+ | ----: | ---: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: |
582
+ | 40 | 200B | 14400 | 4 | 8 | 10 | 1 | 2048 | 35GB | 221.21 | 94.39 | 02-20 |
583
+ | 40 | 187B | 13824 | 4 | 8 | 10 | 1 | 2048 | 33GB | 160.29 | 120.05 | 02-20 |
584
+ | 40 | 187B | 13824 | 4 | 8 | 10 | 2 | 2048 | 39GB | 151.07 | 127.38 | 02-20 |
585
+ | 40 | 187B | 13824 | 4 | 8 | 10 | 4 | 2048 | 53GB | 147.43 | 130.53 | 02-20 |
586
+ | 40 | 187B | 13824 | 4 | 8 | 10 | 8 | 2048 | 73GB | 152.51 | 126.18 | 02-20 |
587
+ | | | | | | | | | | | | |
588
+
589
+
590
+ ## TFLOPs calculation improved
591
+
592
+ Until now we used an estimated TFLOPs calculator which was under-reporting the real TFLOPs. And we couldn't compare those to the TFLOPs reported by [Megatron-LM](https://github.com/NVIDIA/Megatron-LM#readme).
593
+
594
+ Deepak Narayanan fixed this here: https://github.com/bigscience-workshop/Megatron-DeepSpeed/pull/251
595
+
596
+ So from here on all the TLOPs reports will be about 3% higher - so can't exactly compare to the earlier numbers in this document.
597
+
598
+
599
+ ## 48 node contenders
600
+
601
+ So we have 2 set ups that fit well into 48 nodes - and that's PP=6/DP=8 or PP=12/DP=4
602
+
603
+ NHIDDEN=14336 / NLAYERS=72
604
+
605
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
606
+ | ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: |
607
+ | 48 | 181B | 4 | 8 | 12 | 1 | 2048 | 29GB | 143.31 | 112.49 | 02-21 |
608
+ | 48 | 181B | 4 | 8 | 12 | 2 | 2048 | 37GB | 134.02 | 120.29 | 02-21 |
609
+ | 48 | 181B | 4 | 8 | 12 | 4 | 2048 | 49GB | 123.69 | 130.34 | 02-21 |
610
+ | 48 | 181B | 4 | 8 | 12 | 8 | 2048 | 69GB | 129.26 | 124.72 | 02-21 |
611
+ | | | | | | | | | | | |
612
+
613
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
614
+ | ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: |
615
+ | 48 | 181B | 8 | 8 | 6 | 1 | 2048 | 38GB | 139.82 | 115.31 | 02-21 |
616
+ | 48 | 181B | 8 | 8 | 6 | 2 | 2048 | 44GB | 131.02 | 123.05 | 02-21 |
617
+ | 48 | 181B | 8 | 8 | 6 | 4 | 2048 | 56GB | 121.48 | 132.71 | 02-21 |
618
+ | | | | | | | | | | | |
619
+
620
+
621
+ So it's either:
622
+
623
+ * DP=4, PP=12, MBS=4: 123 secs/it | 130 TFLOPS
624
+ * DP=8, PP=06, MBS=4: 121 secs/it | 133 TFLOPS
625
+
626
+ Let's compare again with another setup:
627
+
628
+ NHIDDEN=13824 / NLAYERS=84
629
+
630
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
631
+ | ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: |
632
+ | 48 | 196B | 4 | 8 | 12 | 2 | 2048 | 39GB | 143.89 | 121.45 | 02-21 |
633
+ | 48 | 196B | 4 | 8 | 12 | 4 | 2048 | 52GB | 133.12 | 131.27 | 02-21 |
634
+ | 48 | 196B | 8 | 8 | 6 | 2 | 2048 | 65GB | 141.41 | 123.58 | 02-21 |
635
+ | 48 | 196B | 8 | 8 | 6 | 4 | 2048 | 56GB | 130.31 | 134.11 | 02-21 |
636
+ | | | | | | | | | | | |
637
+
638
+ This one has 15% more layers than the previous tables, so here the less-PP-stages setup wins, that is:
639
+
640
+ * DP=8, PP=06, MBS=4: 130.31 secs/it | 134.11 TFLOPS
641
+
642
+ The following has so far given the highest TFLOPs, as we are packing more into less GPUs so 64 gpus are left out, and of course the total speed for iteration is much slower. So the key is the iteration speed and not TFLOPs.
643
+
644
+ NHIDDEN=13824 / NLAYERS=80
645
+
646
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
647
+ | ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: |
648
+ | 40 | 187B | 8 | 8 | 10 | 4 | 2048 | GB | 147.04 | 135.92 | 02-21 |
649
+ | | | | | | | | | | | |
650
+
651
+
652
+ Max possible TFLOPs check for `NHIDDEN=14336`:
653
+
654
+ NHIDDEN=14336 / NLAYERS=6 / GBS=512
655
+
656
+ | Nodes | Size | Layers | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
657
+ | ----: | ---: | -----: | -: | -: | --: | --: | ---: | -----: | -----: | ----: |
658
+ | 1 | 18B | 6 | 8 | 1 | 2 | 2048 | 54GB | 130.43 | 143.48 | 02-21 |
659
+ | 1 | 18B | 6 | 8 | 1 | 2 | 2048 | 54GB | 119.19 | 157.02 | 02-21 |
660
+ | 1 | 18B | 10 | 8 | 1 | 1 | 2048 | 80GB | 205.52 | 142.59 | 02-21 |
661
+ | | | | | | | | | | | |
662
+
663
+ Trying with ZeRO_STAGE=0/1
664
+
665
+ NHIDDEN=14336 / NLAYERS=72
666
+
667
+ | Nodes | Size | ZS | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
668
+ | ----: | ---: | -: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: |
669
+ | 48 | 181B | 1 | 4 | 8 | 12 | 2 | 2048 | 37GB | 120.29 | 134.02 | 02-21 |
670
+ | 48 | 181B | 0 | 4 | 8 | 12 | 2 | 2048 | 72GB | 137.34 | 113.02 | 02-21 |
671
+ | | | | | | | | | | | | |
672
+
673
+ * ZS = ZERO_STAGE
674
+
675
+ XXX: currently can't test `ZeRO_STAGE=0` on master, or `ZeRO_STAGE=1` on the special branch - so need to retest the above on the same branch.
676
+
677
+
678
+ ## Final round comparison
679
+
680
+ all NHEADS=64 (above too)
681
+
682
+ NHIDDEN=12288 / NLAYERS=96
683
+
684
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
685
+ | ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: |
686
+ | 48 | 177B | 8 | 8 | 6 | 2 | 2048 | GB | 136.73 | 115.73 | 02-23 |
687
+ | 48 | 177B | 8 | 8 | 6 | 4 | 2048 | GB | 122.96 | 128.69 | 02-23 |
688
+ | | | | | | | | | | | |
689
+ | | | | | | | | | | | |
690
+
691
+ NHIDDEN=13312 / NLAYERS=84
692
+
693
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
694
+ | ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: |
695
+ | 48 | 182B | 4 | 8 | 12 | 4 | 2048 | GB | 125.52 | 129.29 | 02-23 |
696
+ | 48 | 182B | 8 | 8 | 6 | 2 | 2048 | GB | 135.55 | 119.72 | 02-23 |
697
+ | 48 | 182B | 8 | 8 | 6 | 4 | 2048 | GB | 122.93 | 132.00 | 02-23 |
698
+ | | | | | | | | | | | |
699
+
700
+ NHIDDEN=13824 / NLAYERS=78
701
+
702
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
703
+ | ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: |
704
+ | 48 | 182B | 8 | 8 | 6 | 4 | 2048 | GB | 121.28 | 133.93 | 02-23 |
705
+ | | | | | | | | | | | |
706
+
707
+ NHIDDEN=14336 / NLAYERS=72
708
+
709
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
710
+ | ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: |
711
+ | 48 | 181B | 4 | 8 | 12 | 4 | 2048 | GB | 123.79 | 130.24 | 02-23 |
712
+ | 48 | 181B | 8 | 8 | 6 | 4 | 2048 | GB | 120.85 | 133.40 | 02-23 |
713
+ | | | | | | | | | | | |
714
+
715
+
716
+ ## NHEADs comparison
717
+
718
+ NHIDDEN=14336 / NLAYERS=72
719
+
720
+ not many variations around 100 as `14336 = 2**11*7` and the constraint is `(HEADS/TP)*MBS % 4 = 0` or for `MBS=4, TP=8` `HEADS % 16 = 0`
721
+
722
+ | Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes |
723
+ | ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: |
724
+ | 48 | 181B | 8 | 8 | 6 | 4 | 16 | 2048 | 54GB | 121.03 | 133.20 | 02-24 |
725
+ | 48 | 181B | 8 | 8 | 6 | 4 | 32 | 2048 | 55GB | 124.01 | 130.00 | 02-23 |
726
+ | 48 | 181B | 8 | 8 | 6 | 4 | 64 | 2048 | 55GB | 120.18 | 134.15 | 02-23 |
727
+ | 48 | 181B | 8 | 8 | 6 | 4 | 112 | 2048 | 53GB | 138.72 | 116.21 | 02-23 |
728
+ | 48 | 181B | 8 | 8 | 6 | 4 | 128 | 2048 | 55GB | 124.89 | 129.08 | 02-23 |
729
+ | 48 | 181B | 8 | 8 | 6 | 4 | 256 | 2048 | 54GB | 132.85 | 121.35 | 02-24 |
730
+ | | | | | | | | | | | | |
731
+
732
+ NHIDDEN=13824 / NLAYERS=78
733
+
734
+ here `13824 = 2**9*3**3`
735
+
736
+ | Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes |
737
+ | ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: |
738
+ | 48 | 182B | 8 | 8 | 6 | 4 | 64 | 2048 | GB | 121.28 | 133.93 | 02-23 |
739
+ | 48 | 182B | 8 | 8 | 6 | 4 | 96 | 2048 | 59GB | 124.75 | 130.21 | 02-23 |
740
+ | 48 | 182B | 8 | 8 | 6 | 4 | 128 | 2048 | 54GB | 162.72 | 99.82 | 02-23 |
741
+ | | | | | | | | | | | | |
742
+
743
+ NHEADS=108 breaks constraints for invoking optimized fused softmax kernel
744
+
745
+
746
+ NHIDDEN=13312 / NLAYERS=84
747
+
748
+ | Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes |
749
+ | ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: |
750
+ | 48 | 182B | 8 | 8 | 6 | 4 | 64 | 2048 | GB | 122.93 | 132.00 | 02-23 |
751
+ | 48 | 182B | 8 | 8 | 6 | 4 | 128 | 2048 | GB | 129.17 | 125.63 | 02-23 |
752
+ | | | | | | | | | | | | |
753
+
754
+
755
+ NHIDDEN=12288 / NLAYERS=96
756
+
757
+ | Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes |
758
+ | ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: |
759
+ | 48 | 177B | 8 | 8 | 6 | 4 | 64 | 2048 | GB | 122.96 | 128.69 | 02-24 |
760
+ | 48 | 177B | 8 | 8 | 6 | 4 | 96 | 2048 | GB | 145.40 | 108.83 | 02-24 |
761
+ | 48 | 177B | 8 | 8 | 6 | 4 | 128 | 2048 | GB | 129.42 | 122.27 | 02-24 |
762
+ | | | | | | | | | | | | |
763
+
764
+
765
+ ## GBS Variations
766
+
767
+ Note: A100s PCI-Express/NUMA was improved today so all TFLOPs have changed for the better (1-5%) - thus do not compare today's numbers to yesterday's.
768
+
769
+ NLAYERS=72
770
+ NHIDDEN=14336
771
+ NHEADS=64
772
+
773
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
774
+ | ----: | ---: | -: | -: | -: | --: | ---: | ---: | -----: | -----: | ----: |
775
+ | 48 | 181B | 8 | 8 | 6 | 4 | 1568 | 56GB | 113.01 | 109.22 | 02-25 |
776
+ | 48 | 181B | 8 | 8 | 6 | 4 | 2048 | 55GB | 114.11 | 141.27 | 02-25 |
777
+ | 48 | 181B | 8 | 8 | 6 | 6 | 2016 | 66GB | 123.57 | 128.43 | 02-25 |
778
+ | 48 | 181B | 4 | 8 | 12 | 4 | 1568 | GB | 92.75 | 133.08 | 02-25 |
779
+ | 48 | 181B | 4 | 8 | 12 | 4 | 2048 | 49GB | 117.07 | 137.70 | 02-25 |
780
+ | 48 | 181B | 4 | 8 | 12 | 2 | 1568 | GB | 99.93 | 123.51 | 02-25 |
781
+ | 48 | 181B | 4 | 8 | 12 | 2 | 2048 | GB | 128.82 | 125.15 | 02-25 |
782
+ | | | | | | | | | | | |
783
+
784
+ some more configs with lower PP:
785
+
786
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
787
+ | ----: | ---: | -: | -: | -: | --: | ---: | ---: | -----: | -----: | ----: |
788
+ | 48 | 181B | 6 | 8 | 8 | 4 | 2016 | 52GB | 113.16 | 140.24 | 02-25 |
789
+ | 48 | 181B | 12 | 8 | 4 | 2 | 2016 | 53GB | 125.52 | 126.43 | 02-25 |
790
+ | 48 | 181B | 12 | 8 | 4 | 4 | 2016 | 59GB | 114.81 | 138.22 | 02-25 |
791
+ | 48 | 181B | 24 | 8 | 2 | 1 | 2016 | 65GB | 145.45 | 109.11 | 02-25 |
792
+ | 48 | 181B | 24 | 8 | 2 | 2 | 2016 | 76GB | 136.13 | 116.58 | 02-25 |
793
+ | 48 | 181B | 48 | 8 | 1 | 1 | 2016 | OOM | | | 02-25 |
794
+ | | | | | | | | | | | |
795
+
796
+ Tweaking TP for the first time from the TP=8 is best assumption. But if the model fits into smaller TP it should be faster!
797
+
798
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
799
+ | ----: | ---: | -: | -: | -: | --: | ---: | ---: | -----: | -----: | ----: |
800
+ | 48 | 181B | 8 | 4 | 12 | 4 | 2048 | 60GB | 111.89 | 144.08 | 02-25 |
801
+ | 48 | 181B | 8 | 4 | 12 | 2 | 2048 | 44GB | 110.48 | 145.92 | 02-25 |
802
+ | 48 | 181B | 8 | 4 | 12 | 2 | 2048 | 38GB | 113.54 | 141.99 | 02-25 |
803
+ | 48 | 181B | 16 | 4 | 6 | 4 | 2048 | 75GB | 117.11 | 137.66 | 02-25 |
804
+ | 48 | 181B | 16 | 4 | 6 | 2 | 2048 | 57GB | 111.71 | 144.31 | 02-25 |
805
+ | 48 | 181B | 16 | 2 | 12 | 2 | 2048 | 63GB | 112.50 | 143.30 | 02-25 |
806
+ | 48 | 181B | 32 | 2 | 6 | 2 | 2048 | OOM | | | 02-25 |
807
+ | 48 | 181B | 32 | 2 | 6 | 1 | 2048 | OOM | | | 02-25 |
808
+ | 48 | 181B | 8 | 2 | 24 | 1 | 2048 | 44GB | 119.53 | 134.88 | 02-25 |
809
+ | 48 | 181B | 8 | 2 | 24 | 2 | 2048 | 53GB | 122.75 | 131.33 | 02-25 |
810
+ | 48 | 181B | 4 | 4 | 24 | 1 | 2048 | GB | 130.60 | 123.44 | 02-25 |
811
+ | | | | | | | | | | | |
812
+
813
+
814
+ NHIDDEN=12288 / NLAYERS=96
815
+
816
+ | Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes |
817
+ | ----: | ---: | -: | -: | -: | --: | ---: | ---: | -----: | -----: | ----: |
818
+ | 48 | 177B | 8 | 1 | 48 | 1 | 2048 | 58GB | 142.17 | 111.30 | 02-25 |
819
+ | | | | | | | | | | | |
820
+
821
+
822
+ ## Another round of NHEADS
823
+
824
+ to retest with TP<8 variations
825
+
826
+ NHIDDEN=13824 / NLAYERS=78
827
+
828
+ | Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes |
829
+ | ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: |
830
+ | 48 | 182B | 8 | 4 | 12 | 1 | 64 | 2048 | | 148.24 | 109.57 | 02-26 |
831
+ | 48 | 182B | 8 | 4 | 12 | 2 | 64 | 2048 | 48GB | 103.51 | 156.92 | 02-26 |
832
+ | 48 | 182B | 8 | 4 | 12 | 2 | 96 | 2048 | 48GB | 107.12 | 151.64 | 02-26 |
833
+ | 48 | 182B | 8 | 4 | 12 | 2 | 128 | 2048 | | 147.41 | 110.19 | 02-26 |
834
+ | 48 | 182B | 8 | 4 | 12 | 4 | 64 | 2048 | | 106.72 | 152.21 | 02-26 |
835
+ | 48 | 182B | 8 | 4 | 12 | 4 | 96 | 2048 | | 110.31 | 147.25 | 02-26 |
836
+ | 48 | 182B | 8 | 4 | 12 | 4 | 128 | 2048 | | 153.90 | 105.54 | 02-26 |
837
+ | 48 | 182B | 8 | 8 | 6 | 4 | 96 | 2048 | | 118.12 | 137.51 | 02-26 |
838
+ | 48 | 182B | 8 | 8 | 6 | 4 | 128 | 2048 | | 156.84 | 103.56 | 02-26 |
839
+ | | | | | | | | | | | | |
840
+
841
+ NHIDDEN=14336 / NLAYERS=72
842
+
843
+ | Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes |
844
+ | ----: | ---: | -: | -: | -: | --: | -----: | ---: | ---: | -----: | -----: | ----: |
845
+ | 48 | 181B | 8 | 4 | 12 | 2 | 64 | 2048 | | 110.42 | 146.00 | 02-26 |
846
+ | 48 | 181B | 8 | 4 | 12 | 2 | 128 | 2048 | | 114.02 | 141.39 | 02-26 |
847
+ | 48 | 181B | 8 | 4 | 12 | 4 | 128 | 2048 | | 137.53 | 117.23 | 02-26 |
848
+ | 48 | 181B | 8 | 8 | 6 | 4 | 64 | 2048 | | 113.95 | 141.47 | 02-26 |
849
+ | 48 | 181B | 8 | 8 | 6 | 4 | 128 | 2048 | | 116.06 | 138.90 | 02-26 |
850
+ | | | | | | | | | | | | |
851
+
852
+ NHIDDEN=13312 / NLAYERS=84
853
+
854
+ | Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes |
855
+ | ----: | ---: | -: | -: | -: | --: | -----: | ---: | ---: | -----: | -----: | ----: |
856
+ | 48 | 182B | 8 | 4 | 12 | 2 | 64 | 2048 | | 103.82 | 156.46 | 02-26 |
857
+ | 48 | 182B | 8 | 4 | 12 | 4 | 64 | 2048 | | 113.21 | 143.34 | 02-26 |
858
+ | 48 | 182B | 8 | 8 | 6 | 2 | 64 | 2048 | | 129.61 | 125.21 | 02-26 |
859
+ | | | | | | | | | | | | |
860
+
861
+ ## Batchsize Warmup
862
+
863
+ NHIDDEN=13824 / NLAYERS=78
864
+
865
+ | Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes |
866
+ | ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: |
867
+ | 48 | 182B | 8 | 4 | 12 | 2 | 96 | 512 | | 35.77 | 113.52 | 02-26 |
868
+ | 48 | 182B | 8 | 4 | 12 | 2 | 96 | 1024 | | 59.65 | 136.15 | 02-26 |
869
+ | 48 | 182B | 8 | 4 | 12 | 2 | 96 | 1536 | | 83.11 | 146.59 | 02-26 |
870
+ | 48 | 182B | 8 | 4 | 12 | 2 | 96 | 2048 | | 107.12 | 151.64 | 02-26 |
871
+ | | | | | | | | | | | | |
872
+
873
+ ## Re-do
874
+
875
+ 78/12=6.5 - so the last stage has 1 block, while the rest have 7 - which is uneven. So that config is not optimal as it wastes gpus.
876
+
877
+ NHIDDEN=13824 / NLAYERS=78
878
+
879
+ | Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes |
880
+ | ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: |
881
+ | 48 | 182B | 8 | 8 | 6 | 2 | 96 | 2048 | GB | 133.57 | 121.61 | 02-27 |
882
+ | 48 | 182B | 8 | 8 | 6 | 4 | 96 | 2048 | 59GB | 118.24 | 137.38 | 02-27 |
883
+ | 48 | 182B | 16 | 4 | 6 | 2 | 96 | 2048 | GB | | | 02-27 |
884
+ | 48 | 182B | 16 | 4 | 6 | 4 | 96 | 2048 | 75GB | 115.55 | 140.57 | 02-27 |
885
+ | | | | | | | | | | | | |
886
+
887
+ HIDDEN=12288; NLAYERS=106; regex partition_method='type:transformer|embed')
888
+
889
+ | Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes |
890
+ | ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: |
891
+ | 48 | 195B | 8 | 4 | 12 | 2 | 96 | 2048 | 44GB | 112.69 | 154.86 | 02-27 |
892
+ | 48 | 195B | 8 | 4 | 12 | 2 | 64 | 2048 | GB | 110.96 | 157.27 | 02-27 |
893
+ | | | | | | | | | | | | |
894
+
895
+ ## Rebalancing layers
896
+
897
+ Do not compare these numbers to the previous ones. For 2 reasons:
898
+
899
+ - First, from now on the testing is happening with BF16 optimizer that was just written to accumulate gradients in fp32, so it is more memory heavy and is a bit slower - this is compared to fp16 which grad accumulates in fp16. The additional memory usage is 4bytes x params and it's not sharded across gpus.
900
+ - I implemented and enabled `--pp-partition-method 'type:transformer|embedding'` so we use 2 layers less, to match `2+nlayers*PP` math to get a perfect balance giving each embedding layer its own slot on par with transformer layers. This is because 250k embedding matrix takes as much space as a single transformer layer.
901
+
902
+ HIDDEN=12288; NLAYERS=106; Model size: 195B, ratio=115
903
+
904
+ | Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes |
905
+ | ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: |
906
+ | 48 | 195B | 8 | 4 | 12 | 2 | 64 | 2048 | 67GB | 116.54 | 149.75 | 02-28 |
907
+ | 48 | 195B | 8 | 4 | 12 | 2 | 96 | 2048 | 65GB | 118.79 | 146.90 | 02-28 |
908
+ | 48 | 195B | 8 | 4 | 12 | 2 | 128 | 2048 | 67GB | 121.42 | 143.73 | 02-28 |
909
+ | 48 | 195B | 8 | 4 | 12 | 4 | 96 | 2048 | 79GB | 120.34 | 145.01 | 02-28 |
910
+ | | | | | | | | | | | | |
911
+
912
+
913
+ HIDDEN=12288; NLAYERS=100; Model size: 184B, ratio=122
914
+
915
+ | Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes |
916
+ | ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: |
917
+ | 48 | 184B | 16 | 4 | 6 | 2 | 64 | 2048 | OOM | x | x | 02-28 |
918
+ | 48 | 184B | 16 | 4 | 6 | 1 | 64 | 2048 | OOM | x | x | 02-28 |
919
+ | 48 | 184B | 8 | 8 | 6 | 2 | 64 | 2048 | 61GB | 139.72 | 117.91 | 02-28 |
920
+ | 48 | 184B | 8 | 8 | 6 | 4 | 64 | 2048 | 72GB | 120.96 | 136.20 | 02-28 |
921
+ | | | | | | | | | | | | |
922
+
923
+
924
+ NHIDDEN=13312; NLAYERS=82; Model size: 178B, ratio=162
925
+
926
+ | Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes |
927
+ | ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: |
928
+ | 48 | 178B | 4 | 8 | 12 | 4 | 64 | 2048 | 52GB | 111.79 | 141.76 | 02-28 |
929
+ | 48 | 178B | 8 | 4 | 12 | 2 | 64 | 2048 | 63GB | 104.45 | 151.71 | 02-28 |
930
+ | 48 | 178B | 8 | 4 | 12 | 2 | 104 | 2048 | 62GB | 123.71 | 128.10 | 02-28 |
931
+ | 48 | 178B | 8 | 4 | 12 | 2 | 128 | 2048 | 60GB | 108.78 | 145.68 | 02-28 |
932
+ | 48 | 178B | 8 | 4 | 12 | 4 | 64 | 2048 | 74GB | 104.82 | 151.18 | 02-28 |
933
+ | | | | | | | | | | | | |
934
+
935
+ NHIDDEN=13312; NLAYERS=94 Model size: 203B, ratio=141
936
+
937
+ | Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes |
938
+ | ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: |
939
+ | 48 | 203B | 8 | 4 | 12 | 2 | 128 | 2048 | 67GB | 124.10 | 146.12 | 02-28 |
940
+ | | | | | | | | | | | | |
941
+
942
+ NHIDDEN=14336; NLAYERS=70; Model size: 176B, ratio=204
943
+
944
+ | Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes |
945
+ | ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: |
946
+ | 48 | 176B | 4 | 8 | 12 | 2 | 64 | 2048 | 40GB | 121.63 | 128.92 | 02-28 |
947
+ | 48 | 176B | 8 | 4 | 12 | 2 | 64 | 2048 | 59GB | 102.03 | 153.68 | 02-28 |
948
+ | 48 | 176B | 8 | 4 | 12 | 2 | 112 | 2048 | 59GB | 104.50 | 150.05 | 02-28 |
949
+ | 48 | 176B | 8 | 4 | 12 | 2 | 128 | 2048 | 60GB | 105.89 | 148.08 | 02-28 |
950
+ | 48 | 176B | 8 | 4 | 12 | 4 | 64 | 2048 | 73GB | 102.27 | 153.33 | 02-28 |
951
+ | | | | | | | | | | | | |
952
+
953
+ NHIDDEN=14336; NLAYERS=82; Model size: 206B, ratio=174
954
+
955
+ | Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes |
956
+ | ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: |
957
+ | 48 | 206B | 8 | 4 | 12 | 2 | 128 | 2048 | OOM | | | 02-28 |
958
+ | | | | | | | | | | | | |
959
+
960
+
961
+
962
+ (was quickly getting the memory snapshot with: `pdsh -w jean-zay-iam01 "source ~/.pdshrc; nvidia-smi"`)
963
+
964
+
965
+ ## Hanging Issue
966
+
967
+ Here we are dealing with 320-384 A100 GPUs working in ensemble.
968
+
969
+ It appears that the system can't handle heavy NCCL traffic or something of sorts. It can handle less than 100B model over 40nodes (TP=8/PP=10/DP=4). It can handle 200B over 10 nodes. At 100B over 20-40 nodes random GPUs start not to respond and the whole system hangs until it times out. I was able to test with the same NHIDDEN and growing the model on the layer dimension:
970
+
971
+ - 10 layers - 25B works
972
+ - 20 layers - 50B works
973
+ - 40 layers - 100B hangs after succeeding iteration 1
974
+
975
+ I was just starting to diagnose on the hidden dimension and now 13/52 nodes are down and so I can't continue with this line of work, since 40 nodes gave me a reliable failure and 20 nodes is intermittent failure, so it's not good for diagnosing.
976
+
977
+ This is for a single replica of 10 nodes with 200B model + 250k vocab.
978
+
979
+ I think the failed nodes that crashed and didn't recover are high suspects for having internal problems. Even though when I tested in groups of 10 nodes everything was dandy - note - the same 200B model.
980
+ One more data point - Deepspeed ZeRO shards data over all gpus - so the more GPUs are involved the more communication happens. This is totally orthogonal to DP.
981
+
982
+ The next day:
983
+
984
+ Most of the nodes have come back this morning so continuing the dimensional growing experiments.
985
+ To remind, growing on the layer dimension and keeping hidden at `1024*14` worked until 40 layers were reached where it was hanging. So it couldn't handle 100B model in this dimension.
986
+ Now I'm keeping the layers dimension frozen to 80 and growing the nhidden dimension, starting from `1024*4` - proving that it works and then incrementing the size until it hangs:
987
+
988
+ - `1024*10` works (100B model)
989
+ - `1024*12` hangs (145B model)
990
+
991
+ So these 2 experiments both show that when the inter-node traffic exceeds certain level - the system is fails.
992
+
993
+ So it's not the size of each `all_reduce`/`broadcast` packet since at full NHIDDEN but only 1/4 of layers everything is just fine.
994
+
995
+ And BTW to get a quick success/failure indication I'm working with `GLOBAL_BATCH_SIZE=64` so PP is very inefficient, but it doesn't matter for the purpose of this experiment.
996
+
997
+ Using `py-spy` on the processes to dump python call stacks I have derived the same story on each node:
998
+
999
+ On each node with TP=8 - i.e. each node is only TP - the same situation: (checked nodes 0 and 1 only)
1000
+
1001
+ 6 processes are in:
1002
+
1003
+ ```
1004
+ Thread 835990 (active): "MainThread"
1005
+ train (megatron/training.py:915)
1006
+ pretrain (megatron/training.py:187)
1007
+ <module> (pretrain_gpt.py:239)
1008
+ ```
1009
+ 2 processes are in:
1010
+ ```
1011
+ Thread 835995 (active): "MainThread"
1012
+ broadcast (torch/distributed/distributed_c10d.py:1191)
1013
+ _aggregate_total_loss (deepspeed/runtime/pipe/engine.py:540)
1014
+ train_batch (deepspeed/runtime/pipe/engine.py:330)
1015
+ train_step (megatron/training.py:436)
1016
+ train (megatron/training.py:851)
1017
+ pretrain (megatron/training.py:187)
1018
+ <module> (pretrain_gpt.py:239)
1019
+ ```
1020
+
1021
+ so 6 processes finished `train_step` and now are trying to:
1022
+ ```
1023
+ torch.distributed.all_reduce(
1024
+ done_cuda, op=torch.distributed.ReduceOp.MAX)
1025
+ ```
1026
+ but for some reason 2 processes never finished the `train_step` and are stuck broadcasting I presume to the other 6 processes, which have long gone.
1027
+
1028
+ So this hanging happens partially in Deepspeed and partially in Megatron-LM, somehow processes get out of sync even though everything works just fine on a smaller scale. But the issue could be brought on by apex's `FusedAdam` as we have dealt with a serious issue in it as well a week earlier, but it could also be pytorch, NCCL or some internal system issue. It's very hard to find the cause.
1029
+
1030
+ As I shared earlier the problem doesn't exist or goes away if either of 2 things happens:
1031
+
1032
+ - the model is under 100B (short stack of layer or narrow hidden) and 20 or more nodes are used in a single job
1033
+ - `CUDA_LAUNCH_BLOCKING=1`
1034
+
1035
+ Topology is TP=8, PP=10, DP=4
1036
+
1037
+ It has been very difficult to work on diagnosing this issue since every time I run the hanging setup I would lose a few nodes and since I'm 10h behind JeanZay, nobody is around there to reboot the nodes.
1038
+
1039
+ So first of all it appears that `CUDA_LAUNCH_BLOCKING=1` removes the hanging issue and I did several performance checks and it surprisingly has no impact on this framework at this scale. Normally, it should make things much slower as it makes CUDA ops synchronous.
1040
+
1041
+ ### py-spying all processes
1042
+
1043
+ After discussing this issue with Samyam I first run `py-spy` on all processes, but alas several processes weren't responding, so we had no idea how to tell where they were hanging.
1044
+
1045
+ For posterity here is the process:
1046
+
1047
+
1048
+ In one console, first allocate the gpus:
1049
+ ```
1050
+ salloc --partition=gpu_p5 --constraint=a100 --reservation=hug --nodes=2 --ntasks-per-node=1 --cpus-per-task=64 --hint=nomultithread --gres=gpu:8 --time 20:00:00 --account=six@a100
1051
+ ```
1052
+ We are doing that so that if SLURM kills the processes we could still access those.
1053
+
1054
+ Now run the training job, which calls the main `srun` with all the gpus:
1055
+ ```
1056
+ bash 200B-n40-bf16-mono.slurm
1057
+ ```
1058
+
1059
+ Wait till the program hangs.
1060
+
1061
+ Now in another console get the `SLURM_JOBID` (or get it from `salloc` log):
1062
+ ```
1063
+ squeue -u `whoami` -o "%.16i %.9P %.26j %.8T %.10M %.8l %.6D %.20S %R"
1064
+ ```
1065
+
1066
+ Adjust jobid with `SLURM_JOBID` from above:
1067
+ ```
1068
+ srun --jobid=2180718 --gres=gpu:0 --nodes=40 --tasks-per-node=1 --output=trace-%N.out sh -c 'ps aux | grep python | egrep -v "grep|srun" | grep `whoami` | awk "{print \$2}" | xargs -I {} py-spy dump --native --pid {}' || echo "failed"
1069
+ ```
1070
+
1071
+ Must use `--gres=gpu:0` for the monitor `srun` or otherwise it will block until the first `srun` exits
1072
+
1073
+ I also attempted using `pdsh` via `ds_ssh`, but somehow I wasn't able to run `py-spy` remotely - the main issue was that remote `ssh` command wasn't giving the same env as when I was logged in interactively via `ssh`. But if you have `sudo` access on the compute nodes than you could do:
1074
+
1075
+ First prepare `hostfile`:
1076
+ ```
1077
+ function makehostfile() {
1078
+ perl -e '$slots=split /,/, $ENV{"SLURM_STEP_GPUS"};
1079
+ $slots=8 if $slots==0; # workaround 8 gpu machines
1080
+ @nodes = split /\n/, qx[scontrol show hostnames $ENV{"SLURM_JOB_NODELIST"}];
1081
+ print map { "$b$_ slots=$slots\n" } @nodes'
1082
+ }
1083
+ makehostfile > hostfile
1084
+ ```
1085
+
1086
+ Now run the `py-spy` extraction command over all nodes:
1087
+ ```
1088
+ ds_ssh -f hostfile "source ~/.pdshrc; ps aux | grep python | grep -v grep | grep `whoami` | awk '{print \$2}' | xargs -I {} sudo py-spy dump --pid {} "
1089
+ ```
1090
+
1091
+ ### python trace
1092
+
1093
+ So next came the idea of tracing all calls like one does with `strace(1)`, I researched python calls tracing facilities and have discovered that python has a `trace` sub-system.
1094
+
1095
+ This code will trace all python calls and log them to the console and into a dedicated per process log file, via a custom `Tee` module I added.
1096
+
1097
+ This then can help to understand where some processes stopped responding, since we will have the log of the last call before it went unresponsive.
1098
+
1099
+ ```
1100
+ $ cat pretrain_gpt.py
1101
+ [...]
1102
+
1103
+ def main():
1104
+ pretrain(train_valid_test_datasets_provider, model_provider, forward_step,
1105
+ args_defaults={'tokenizer_type': 'GPT2BPETokenizer'})
1106
+
1107
+ import re
1108
+ class Tee:
1109
+ """
1110
+ A helper class to tee print's output into a file.
1111
+ Usage:
1112
+ sys.stdout = Tee(filename)
1113
+ """
1114
+
1115
+ def __init__(self, filename):
1116
+ self.stdout = sys.stdout
1117
+ self.file = open(filename, "a")
1118
+
1119
+ def __getattr__(self, attr):
1120
+ return getattr(self.stdout, attr)
1121
+
1122
+ def write(self, msg):
1123
+ self.stdout.write(msg)
1124
+ self.file.write(msg)
1125
+ self.file.flush()
1126
+
1127
+ def flush(self):
1128
+ self.stdout.flush()
1129
+ self.file.flush()
1130
+
1131
+ if __name__ == "__main__":
1132
+
1133
+ import sys
1134
+ import trace
1135
+ import socket
1136
+ import os
1137
+
1138
+ # enable to trace
1139
+ if 0:
1140
+ cwd = os.path.realpath('.')
1141
+ pid = os.getpid()
1142
+ hostname = socket.gethostname()
1143
+ local_rank = int(os.environ["LOCAL_RANK"])
1144
+ trace_output_file = f"{cwd}/trace-{hostname}-{local_rank}-{pid}.txt"
1145
+
1146
+ # create a Trace object, telling it what to ignore, and whether to
1147
+ # do tracing or line-counting or both.
1148
+ tracer = trace.Trace(
1149
+ ignoredirs=[sys.prefix, sys.exec_prefix],
1150
+ trace=1,
1151
+ count=1,
1152
+ )
1153
+ # outfile=trace_output_file)
1154
+
1155
+ # run the new command using the given tracer
1156
+ sys.stdout = Tee(trace_output_file)
1157
+ tracer.run('main()')
1158
+ else:
1159
+ main()
1160
+
1161
+ ```
1162
+
1163
+ This code doesn't require any special handing other than enabling the trace by changing `if 0` to `if 1`.
1164
+
1165
+ Of course, this will now dump all python calls. I was worried that the slowdown will mask the issue causing the hanging, but surprisingly it didn't.
1166
+
1167
+ I got 14GB (!) of data logged of just python calls from 320 processes.
1168
+
1169
+ In retrospect I probably should have started the tracing at a later place, probably just before `train_step` - otherwise we have gotten a lot of useless traces of the dataloader and other preliminary code.
1170
+
1171
+ I wish I could tell `trace` which packages to follow, but alas it only supports dirs to ignore, which is much more difficult to set, and thus you end up with a lot more data than one needs. But still this is a super useful tool for debugging hanging processes.
1172
+
1173
+
1174
+ ### To be continued
1175
+
1176
+ We needed to do some more tweaks to get to the root of it.
1177
+
1178
+ Unfortunately I had to pause here, since I had to switch to testing the final version of the code and I couldn't risk losing nodes.
1179
+
1180
+ With having `CUDA_LAUNCH_BLOCKING=1` workaround providing a robust solution we will use that for a time being.
1181
+
1182
+ # a few preliminary runs
1183
+
1184
+
1185
+ ## main-1
1186
+
1187
+ While the final data is being cleaned up we are doing a few preliminary runs with data that still has some issues.
1188
+
1189
+ GBS ramp up of `--rampup-batch-size 16 16 9_765_625` - the first few stages starting with GBS=16 are really slow (8 TFLOPs). The pipeline doesn't have enough data to even fill all the stages once, so it's super inefficient and it'll take days until we start hitting 100 TFLOPs.
1190
+
1191
+ But there were no spikes during this brief experiment.
1192
+
1193
+
1194
+
1195
+ ## main-2
1196
+
1197
+ Trying `--rampup-batch-size 384 16 9_765_625` since 384 is the first GBS where the pipe is filled up fully for the first time. `12*2*4=384` (`PP*MBS*DP`). The throughput start at 100 TFLOPs right away (and it should be 150 TFLOPS once we reach GBS=2048).
1198
+
1199
+ Found a bug: tied weights weren't getting reduced - was getting a spike on restart, fixed at
1200
+ https://github.com/microsoft/DeepSpeed/pull/1801/commits/37011a92bad42b07c2cb742751873ef7073d84b8
1201
+
1202
+ So only the front embed matrix grad updates were making, the end one were ignored.
1203
+
1204
+ Will do a totally new run to compare that it's similar or better.
1205
+
1206
+
1207
+
1208
+
1209
+ ## main-3
1210
+
1211
+ Trying the rebased to master version 61d51fd62141ddb51b629b785af256fac407e048 and it has serious issues - the learning is much much slower
1212
+
1213
+ ## main-4
1214
+
1215
+ So rolling back `olruwase/bf16-updates` branch to the fix:
1216
+
1217
+ 37011a92bad42b07c2cb742751873ef7073d84b8 Reduce tied weight gradients
1218
+
1219
+ This time the learning is just a tad slower than main-2, so either deepspeed@master introduced some regression or the merge didn't go well.
1220
+
1221
+ additionally going to try the latest checkpoint from `main-3` as it's more progressive to check `main-4` for spikes.
1222
+
1223
+ same spike.
1224
+
1225
+ After analysis the module weights, it's clear we have 2 distinct issues:
1226
+ 1. `module.tied_modules.embed.word_embeddings.norm` is not taken care of at all in pp rank -1 and they get reset to defaults on load
1227
+ 2. `module.tied_modules.embed.word_embeddings.weight` is mismatching on pp rank -1 between after iteration of last step before save and before iteration of first step after load
1228
+
1229
+ This was derived with this debug instrumentation:
1230
+
1231
+ ```
1232
+ diff --git a/megatron/training.py b/megatron/training.py
1233
+ index fd65ae9..fd76d28 100644
1234
+ --- a/megatron/training.py
1235
+ +++ b/megatron/training.py
1236
+ @@ -58,6 +58,23 @@ from megatron.data.dataset_utils import analyze_data_prefix
1237
+
1238
+ import deepspeed
1239
+
1240
+ +def dump_weights(preamble, iteration, model):
1241
+ +
1242
+ + import os
1243
+ + import socket
1244
+ + hostname = socket.gethostname()
1245
+ + pid = os.getpid()
1246
+ +
1247
+ + tp_rank = mpu.get_tensor_model_parallel_rank()
1248
+ + pp_rank = mpu.get_pipeline_model_parallel_rank()
1249
+ + dp_rank = mpu.get_data_parallel_rank()
1250
+ + global_rank = torch.distributed.get_rank()
1251
+ +
1252
+ + fn = f"debug-{iteration}-pp{pp_rank}-tp{tp_rank}-dp{dp_rank}-global{global_rank}-{preamble}-{pid}.txt"
1253
+ + #print(fn)
1254
+ + with open(fn, "w") as fh:
1255
+ + for n, p in model[0].named_parameters():
1256
+ + fh.write(f"{n}={p}\n")
1257
+
1258
+ def print_datetime(string):
1259
+ """Note that this call will sync across all ranks."""
1260
+ @@ -426,6 +443,8 @@ def setup_model_and_optimizer(model_provider_func):
1261
+ if args.fp16:
1262
+ optimizer.reload_model_params()
1263
+
1264
+ + #optimizer.update_lp_params()
1265
+ +
1266
+ return model, optimizer, lr_scheduler
1267
+
1268
+
1269
+ @@ -848,12 +867,18 @@ def train(forward_step_func, model, optimizer, lr_scheduler,
1270
+ args.pipeline_model_parallel_size >= 1:
1271
+ args.curriculum_seqlen = args.curriculum_scheduler.update_difficulty( \
1272
+ args.iteration + 1)
1273
+ +
1274
+ + dump_weights("before-iteration", iteration+1, model)
1275
+ +
1276
+ loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = \
1277
+ train_step(forward_step_func,
1278
+ train_data_iterator,
1279
+ model,
1280
+ optimizer,
1281
+ lr_scheduler)
1282
+ +
1283
+ + dump_weights("after-iteration", iteration+1, model)
1284
+ +
1285
+ iteration += 1
1286
+ args.iteration = iteration
1287
+ new_samples = mpu.get_data_parallel_world_size() * \
1288
+ ```
1289
+
1290
+ and then
1291
+ 1. run 5 iterations and saved checkpoint, then run:
1292
+ ```
1293
+ mkdir a; mv debug-* a
1294
+ ```
1295
+ 2. restarted and run a few iterations, then run:
1296
+
1297
+ ```
1298
+ mkdir b; mv debug-* b
1299
+ ```
1300
+
1301
+ I basically dumped weights for all ranks before and after train_step
1302
+
1303
+ Now let's compared them all. Comparing:
1304
+ 1. the after iteration of the last step before save (iteration 805 in this example)
1305
+ 2. the before iteration step after the load (on restart) (iteration 806 in this example)
1306
+
1307
+ with the help of:
1308
+ ```
1309
+ perl -le 'print qx[diff -u a/debug-805-*global$_-after-iteration-*.txt b/debug-806-*-global$_-before-iteration-*.txt] for 0..383'
1310
+ ```
1311
+
1312
+ Result: all `a/debug-805-pp11-*-after-iteration-*.txt` and corresponding `b/debug-806-pp11-*-before-iteration-*.txt` mismatch.
1313
+
1314
+ so here is a sample diff:
1315
+ ```
1316
+ --- a/debug-805-pp11-tp1-dp4-global369-after-iteration-377074.txt 2022-03-06 05:44:06.074835000 +0100
1317
+ +++ b/debug-806-pp11-tp1-dp4-global369-before-iteration-378990.txt 2022-03-06 05:48:24.842635000 +0100
1318
+ @@ -1,21 +1,15 @@
1319
+ module.tied_modules.embed.word_embeddings.weight=Parameter containing:
1320
+ -tensor([[-3.1090e-04, 4.6082e-03, -2.3499e-03, ..., -1.1292e-02,
1321
+ - 2.1667e-03, -2.7313e-03],
1322
+ - [-1.1353e-02, 9.9487e-03, -1.9684e-03, ..., -5.4550e-04,
1323
+ - -2.3460e-04, 4.2114e-03],
1324
+ - [ 3.2806e-03, -3.4332e-04, -5.5847e-03, ..., 7.6294e-03,
1325
+ - 1.7853e-03, 2.5868e-05],
1326
+ +tensor([[-0.0006, 0.0046, -0.0024, ..., -0.0114, 0.0014, -0.0030],
1327
+ + [-0.0109, 0.0096, -0.0020, ..., -0.0005, -0.0001, 0.0041],
1328
+ + [ 0.0027, -0.0004, -0.0056, ..., 0.0070, 0.0017, 0.0003],
1329
+ ...,
1330
+ - [ 1.6098e-03, 4.1809e-03, -2.4567e-03, ..., -4.6692e-03,
1331
+ - -4.5776e-03, 1.7090e-03],
1332
+ - [ 5.7373e-03, 3.5858e-03, -1.7471e-03, ..., 2.3041e-03,
1333
+ - -6.4392e-03, 1.0223e-03],
1334
+ - [-1.6937e-03, -1.4038e-02, 2.1057e-03, ..., -3.6011e-03,
1335
+ - 1.3275e-03, -5.8594e-03]], device='cuda:1', dtype=torch.bfloat16,
1336
+ - requires_grad=True)
1337
+ + [ 0.0018, 0.0039, -0.0026, ..., -0.0051, -0.0043, 0.0016],
1338
+ + [ 0.0051, 0.0039, -0.0015, ..., 0.0027, -0.0063, 0.0008],
1339
+ + [-0.0018, -0.0142, 0.0021, ..., -0.0035, 0.0015, -0.0060]],
1340
+ + device='cuda:1', dtype=torch.bfloat16, requires_grad=True)
1341
+ module.tied_modules.embed.word_embeddings.norm.weight=Parameter containing:
1342
+ -tensor([0.9961, 0.9961, 0.9961, ..., 0.9961, 0.9961, 0.9961], device='cuda:1',
1343
+ - dtype=torch.bfloat16, requires_grad=True)
1344
+ +tensor([1., 1., 1., ..., 1., 1., 1.], device='cuda:1', dtype=torch.bfloat16,
1345
+ + requires_grad=True)
1346
+ module.tied_modules.embed.word_embeddings.norm.bias=Parameter containing:
1347
+ tensor([0., 0., 0., ..., 0., 0., 0.], device='cuda:1', dtype=torch.bfloat16,
1348
+ requires_grad=True)
1349
+ ```
1350
+
1351
+
1352
+ ## main-5
1353
+
1354
+ trying a new baseline with rampup starting from 192
1355
+
1356
+
1357
+
1358
+ ## main-6
1359
+
1360
+ trying https://github.com/bigscience-workshop/Megatron-DeepSpeed/pull/260 - comparing with main-5
1361
+
1362
+ tracks exactly main-5 - merged.
1363
+
1364
+
1365
+ ## main-7
1366
+
1367
+ Running with https://github.com/bigscience-workshop/Megatron-DeepSpeed/pull/261
1368
+
1369
+ Don't allocate embed LN on pp rank -1, - different checkpoint
1370
+
1371
+ still spikes on restart
1372
+
1373
+
1374
+ # main-no-emb-norm
1375
+
1376
+ disable `--embed-layernorm` completely, check if spikes on restart
1377
+
1378
+ no spikes on restart
1379
+
1380
+ ## main-8
1381
+
1382
+ 1. test https://github.com/bigscience-workshop/Megatron-DeepSpeed/pull/262
1383
+
1384
+ 2. At 1438 switched to deepspeed@ab61edb02a137d91b61bd416b4e8d3eb287b0eba of olruwase/bf16-updates - let's see if it tracks still the previous runs - yes it does.
1385
+
1386
+ So the restart spike's cause was this: the framework was putting `LayerNorm` that I added for the embedding layr into the wrong param group [here](https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/dd06ea32e014d8db6cdaf5e6839071d6523ca83c/megatron/optimizer/__init__.py#L31-L45).
1387
+
1388
+ it should have been in `no_weight_decay_params` but ended up in `weight_decay_params` because in this module `LayerNorm` is an alias for `MixedFusedLayerNorm`, so if `isinstance(module_, LayerNorm)` was `False`.
1389
+
1390
+ So if we want to use `torch.nn.LayerNorm` we have to change the code above to additionally check for ` or isinstance(module_, torch.nn.LayerNorm).`
1391
+
1392
+ ## main-9
1393
+
1394
+ re-running with deepspeed@77b649d160c1cd86f33415e2a7deab50c45fba16 of olruwase/bf16-updates which fixed the tied-embedding desynchronization bug due to clip grads not running on the last pp rank for tied embeddings.
train/tr11-176B-ml/finetune.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Finetuning 176B
2
+
3
+ Finetuning 176B at the end of the training might be necessary to ensure exact logits match between Megatron-DS trained model and HF model.
4
+ For now, there are 2 main bottlenecks that are responsible of not giving 100% logits match between HF model and Megatron model
5
+
6
+ ## Diverging bottlenecks
7
+
8
+ ### TP merging strategy
9
+
10
+ See [this issue](https://github.com/pytorch/pytorch/issues/76232). When merging TP ranks the logits exactness is lost. The idea would be to finetune the 176B model with TP=1
11
+
12
+ ### Use `torch_softmax` instead of `fused_softmax`
13
+
14
+ `fused_softmax` and `torch_softmax` does not give the same results (ie, `torch.testing.assert_allclose(atol=0.0, rtol=0.0)` does not pass). The main model could be finetuned with `torch_softmax`.
15
+ See [this line](https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/cb48bd2c8bc182fb9872f127ef7c2267fbf9cc2e/megatron/model/fused_softmax.py#L204)
train/tr11-176B-ml/smaller_models/tr11b-1B3-ml.slurm ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr11b-1B3-ml
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=16
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --account=six@v100
12
+
13
+ set -x -e
14
+
15
+ #source $six_ALL_CCFRWORK/start-py38-pt110
16
+ #source $six_ALL_CCFRWORK/start-py38-pt111
17
+ source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml
18
+
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=main
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11b-1B3-ml
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr11b-1B3-ml-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+
30
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed
31
+ cd $MEGATRON_DEEPSPEED_REPO
32
+
33
+ BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience
34
+ TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-1B3.txt
35
+ VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-1B3.txt
36
+ CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json
37
+ LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py
38
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH
39
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH
40
+
41
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
42
+
43
+ # defining the right environment variables
44
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
45
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
46
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
47
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
48
+ export HF_DATASETS_OFFLINE=1
49
+ export TRANSFORMERS_OFFLINE=1
50
+
51
+ # testing for potential faulty nodes
52
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
53
+
54
+ # so processes know who to talk to
55
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
56
+ MASTER_PORT=6000
57
+
58
+ GPUS_PER_NODE=4
59
+ NNODES=$SLURM_NNODES
60
+
61
+ PP_SIZE=2
62
+ TP_SIZE=2
63
+
64
+ MICRO_BATCH_SIZE=1
65
+ GLOBAL_BATCH_SIZE=512
66
+
67
+ NLAYERS=24
68
+ NHIDDEN=2048
69
+ NHEADS=16
70
+ SEQ_LEN=2048
71
+
72
+ SAVE_INTERVAL=250
73
+
74
+ TRAIN_SAMPLES=220_000_000 # 450B tokens
75
+ LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr
76
+ LR_WARMUP_SAMPLES=183_105 # 375M tokens
77
+
78
+
79
+ OPTIMIZER_ARGS=" \
80
+ --optimizer adam \
81
+ --adam-beta1 0.9 \
82
+ --adam-beta2 0.95 \
83
+ --adam-eps 1e-8 \
84
+ --lr 2e-4 \
85
+ --min-lr 1e-5 \
86
+ --lr-decay-style cosine \
87
+ --lr-decay-samples $LR_DECAY_SAMPLES \
88
+ --lr-warmup-samples $LR_WARMUP_SAMPLES \
89
+ --clip-grad 1.0 \
90
+ --weight-decay 1e-1 \
91
+ "
92
+ # for 20h 1190, for 100h 5990
93
+ # --exit-duration-in-mins 1190 \
94
+ EXIT_OPTS=" \
95
+ --exit-duration-in-mins 5990 \
96
+ "
97
+
98
+ GPT_ARGS=" \
99
+ --pp-partition-method 'type:transformer|embedding' \
100
+ --num-layers $NLAYERS \
101
+ --hidden-size $NHIDDEN \
102
+ --num-attention-heads $NHEADS \
103
+ --seq-length $SEQ_LEN \
104
+ --max-position-embeddings $SEQ_LEN \
105
+ --micro-batch-size $MICRO_BATCH_SIZE \
106
+ --rampup-batch-size 192 16 9_765_625 \
107
+ --global-batch-size $GLOBAL_BATCH_SIZE \
108
+ --train-samples $TRAIN_SAMPLES \
109
+ --tokenizer-type PretrainedFromHF \
110
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
111
+ --init-method-std 0.0048 \
112
+ --embed-layernorm \
113
+ --fp16 \
114
+ --seed 42 \
115
+ --position-embedding-type alibi \
116
+ --checkpoint-activations \
117
+ --abort-on-unmet-fused-kernel-constraints \
118
+ --pad-vocab-size-to 250880 \
119
+ $OPTIMIZER_ARGS \
120
+ $EXIT_OPTS \
121
+ "
122
+
123
+ # TODO: decide on efficient eval-interval + eval-iters
124
+
125
+ OUTPUT_ARGS=" \
126
+ --log-interval 1 \
127
+ --save-interval $SAVE_INTERVAL \
128
+ --eval-interval 1000 \
129
+ --eval-iters 1 \
130
+ --tensorboard-dir $TENSORBOARD_PATH \
131
+ --tensorboard-queue-size 5 \
132
+ --log-timers-to-tensorboard \
133
+ --log-batch-size-to-tensorboard \
134
+ --log-validation-ppl-to-tensorboard \
135
+ "
136
+
137
+ ZERO_STAGE=0 # important: bf16 must use z0! it implements its own zero stage 1 equivalent
138
+
139
+ config_json="./ds_config.$SLURM_JOBID.json"
140
+
141
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
142
+ cat <<EOT > $config_json
143
+ {
144
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
145
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
146
+ "gradient_clipping": 1.0,
147
+ "zero_optimization": {
148
+ "stage": $ZERO_STAGE
149
+ },
150
+ "fp16": {
151
+ "enabled": true,
152
+ "loss_scale": 0,
153
+ "loss_scale_window": 500,
154
+ "hysteresis": 2,
155
+ "min_loss_scale": 1,
156
+ "initial_scale_power": 12
157
+ },
158
+ "steps_per_print": 2000,
159
+ "wall_clock_breakdown": false
160
+ }
161
+ EOT
162
+
163
+
164
+ DEEPSPEED_ARGS=" \
165
+ --deepspeed \
166
+ --deepspeed_config ${config_json} \
167
+ --zero-stage ${ZERO_STAGE} \
168
+ --deepspeed-activation-checkpointing \
169
+ "
170
+
171
+ export LAUNCHER="python -u -m torch.distributed.run \
172
+ --nproc_per_node $GPUS_PER_NODE \
173
+ --nnodes $NNODES \
174
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
175
+ --rdzv_backend c10d \
176
+ --max_restarts 0 \
177
+ --tee 3 \
178
+ "
179
+
180
+ export CMD=" \
181
+ `pwd`/pretrain_gpt.py \
182
+ --tensor-model-parallel-size $TP_SIZE \
183
+ --pipeline-model-parallel-size $PP_SIZE \
184
+ $GPT_ARGS \
185
+ $OUTPUT_ARGS \
186
+ --save $CHECKPOINT_PATH \
187
+ --load $CHECKPOINT_PATH \
188
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
189
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
190
+ --data-impl mmap \
191
+ --distributed-backend nccl \
192
+ $DEEPSPEED_ARGS \
193
+ "
194
+
195
+ echo $CMD
196
+
197
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
198
+ export CUDA_LAUNCH_BLOCKING=1
199
+
200
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
201
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
202
+
203
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
204
+
205
+ echo "END TIME: $(date)"
train/tr11-176B-ml/smaller_models/tr11c-2B5-ml-continuation.slurm ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr11c-2B5-ml
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=32
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH -C v100-32g
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+ source /gpfsscratch/rech/six/commun/commun/experiments/muennighoff/muennighoffsmallmodels
17
+
18
+ echo "START TIME: $(date)"
19
+
20
+ variant=main
21
+
22
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11c-2B5-ml
23
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
24
+ REPO_PATH=$DATA_OUTPUT_PATH/tr11c-2B5-ml-logs
25
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
26
+ LOGS_PATH=$REPO_PATH/logs/$variant
27
+ mkdir -p $LOGS_PATH
28
+
29
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed
30
+ cd $MEGATRON_DEEPSPEED_REPO
31
+
32
+ BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience
33
+ TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-2B5.txt
34
+ VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-2B5.txt
35
+ CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json
36
+ LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py
37
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH
38
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH
39
+
40
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
41
+
42
+ # defining the right environment variables
43
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
44
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
45
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
46
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
47
+ export HF_DATASETS_OFFLINE=1
48
+ export TRANSFORMERS_OFFLINE=1
49
+
50
+ # testing for potential faulty nodes
51
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
52
+
53
+ # so processes know who to talk to
54
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
55
+ MASTER_PORT=6000
56
+
57
+ GPUS_PER_NODE=4
58
+ NNODES=$SLURM_NNODES
59
+
60
+ PP_SIZE=2
61
+ TP_SIZE=1
62
+
63
+ MICRO_BATCH_SIZE=1
64
+ GLOBAL_BATCH_SIZE=512
65
+
66
+ NLAYERS=30
67
+ NHIDDEN=2560
68
+ NHEADS=32
69
+ SEQ_LEN=2048
70
+
71
+ SAVE_INTERVAL=250
72
+
73
+ TRAIN_SAMPLES=220_000_000 # 450B tokens
74
+ LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr
75
+ LR_WARMUP_SAMPLES=183_105 # 375M tokens
76
+
77
+
78
+ OPTIMIZER_ARGS=" \
79
+ --optimizer adam \
80
+ --adam-beta1 0.9 \
81
+ --adam-beta2 0.95 \
82
+ --adam-eps 1e-8 \
83
+ --lr 1.6e-4 \
84
+ --min-lr 1e-5 \
85
+ --lr-decay-style cosine \
86
+ --lr-decay-samples $LR_DECAY_SAMPLES \
87
+ --lr-warmup-samples $LR_WARMUP_SAMPLES \
88
+ --clip-grad 1.0 \
89
+ --weight-decay 1e-1 \
90
+ "
91
+ # for 20h 1190, for 100h 5990
92
+ # --exit-duration-in-mins 1190 \
93
+ EXIT_OPTS=" \
94
+ --exit-duration-in-mins 5990 \
95
+ "
96
+
97
+ GPT_ARGS=" \
98
+ --pp-partition-method 'type:transformer|embedding' \
99
+ --num-layers $NLAYERS \
100
+ --hidden-size $NHIDDEN \
101
+ --num-attention-heads $NHEADS \
102
+ --seq-length $SEQ_LEN \
103
+ --max-position-embeddings $SEQ_LEN \
104
+ --micro-batch-size $MICRO_BATCH_SIZE \
105
+ --rampup-batch-size 192 32 9_765_625 \
106
+ --global-batch-size $GLOBAL_BATCH_SIZE \
107
+ --train-samples $TRAIN_SAMPLES \
108
+ --tokenizer-type PretrainedFromHF \
109
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
110
+ --init-method-std 0.0048 \
111
+ --embed-layernorm \
112
+ --fp16 \
113
+ --seed 42 \
114
+ --position-embedding-type alibi \
115
+ --checkpoint-activations \
116
+ --abort-on-unmet-fused-kernel-constraints \
117
+ --pad-vocab-size-to 250880 \
118
+ $OPTIMIZER_ARGS \
119
+ $EXIT_OPTS \
120
+ "
121
+
122
+ # TODO: decide on efficient eval-interval + eval-iters
123
+
124
+ OUTPUT_ARGS=" \
125
+ --log-interval 1 \
126
+ --save-interval $SAVE_INTERVAL \
127
+ --eval-interval 1000 \
128
+ --eval-iters 1 \
129
+ --tensorboard-dir $TENSORBOARD_PATH \
130
+ --tensorboard-queue-size 5 \
131
+ --log-timers-to-tensorboard \
132
+ --log-batch-size-to-tensorboard \
133
+ --log-validation-ppl-to-tensorboard \
134
+ "
135
+
136
+ ZERO_STAGE=1 # important: bf16 must use z0! it implements its own zero stage 1 equivalent
137
+
138
+ config_json="./ds_config.$SLURM_JOBID.json"
139
+
140
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
141
+ cat <<EOT > $config_json
142
+ {
143
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
144
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
145
+ "gradient_clipping": 1.0,
146
+ "zero_optimization": {
147
+ "stage": $ZERO_STAGE
148
+ },
149
+ "fp16": {
150
+ "enabled": true,
151
+ "loss_scale": 0,
152
+ "loss_scale_window": 500,
153
+ "hysteresis": 2,
154
+ "min_loss_scale": 1,
155
+ "initial_scale_power": 12
156
+ },
157
+ "steps_per_print": 2000,
158
+ "wall_clock_breakdown": false
159
+ }
160
+ EOT
161
+
162
+
163
+ DEEPSPEED_ARGS=" \
164
+ --deepspeed \
165
+ --deepspeed_config ${config_json} \
166
+ --zero-stage ${ZERO_STAGE} \
167
+ --deepspeed-activation-checkpointing \
168
+ "
169
+
170
+ export LAUNCHER="python -u -m torch.distributed.run \
171
+ --nproc_per_node $GPUS_PER_NODE \
172
+ --nnodes $NNODES \
173
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
174
+ --rdzv_backend c10d \
175
+ --max_restarts 0 \
176
+ --tee 3 \
177
+ "
178
+
179
+ export CMD=" \
180
+ `pwd`/pretrain_gpt.py \
181
+ --tensor-model-parallel-size $TP_SIZE \
182
+ --pipeline-model-parallel-size $PP_SIZE \
183
+ $GPT_ARGS \
184
+ $OUTPUT_ARGS \
185
+ --save $CHECKPOINT_PATH \
186
+ --load $CHECKPOINT_PATH \
187
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
188
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
189
+ --data-impl mmap \
190
+ --distributed-backend nccl \
191
+ $DEEPSPEED_ARGS \
192
+ "
193
+
194
+ echo $CMD
195
+
196
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
197
+ export CUDA_LAUNCH_BLOCKING=1
198
+
199
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
200
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
201
+
202
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
203
+
204
+ echo "END TIME: $(date)"
train/tr11-176B-ml/smaller_models/tr11c-2B5-ml.slurm ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr11c-2B5-ml
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=32
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --account=six@v100
12
+
13
+ set -x -e
14
+
15
+ #source $six_ALL_CCFRWORK/start-py38-pt110
16
+ #source $six_ALL_CCFRWORK/start-py38-pt111
17
+ source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml
18
+
19
+ echo "START TIME: $(date)"
20
+
21
+ variant=main
22
+
23
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11c-2B5-ml
24
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
25
+ REPO_PATH=$DATA_OUTPUT_PATH/tr11c-2B5-ml-logs
26
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
27
+ LOGS_PATH=$REPO_PATH/logs/$variant
28
+ mkdir -p $LOGS_PATH
29
+
30
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed
31
+ cd $MEGATRON_DEEPSPEED_REPO
32
+
33
+ BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience
34
+ TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-2B5.txt
35
+ VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-2B5.txt
36
+ CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json
37
+ LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py
38
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH
39
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH
40
+
41
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
42
+
43
+ # defining the right environment variables
44
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
45
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
46
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
47
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
48
+ export HF_DATASETS_OFFLINE=1
49
+ export TRANSFORMERS_OFFLINE=1
50
+
51
+ # testing for potential faulty nodes
52
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
53
+
54
+ # so processes know who to talk to
55
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
56
+ MASTER_PORT=6000
57
+
58
+ GPUS_PER_NODE=4
59
+ NNODES=$SLURM_NNODES
60
+
61
+ PP_SIZE=4
62
+ TP_SIZE=4
63
+
64
+ MICRO_BATCH_SIZE=1
65
+ GLOBAL_BATCH_SIZE=512
66
+
67
+ NLAYERS=30
68
+ NHIDDEN=2560
69
+ NHEADS=32
70
+ SEQ_LEN=2048
71
+
72
+ SAVE_INTERVAL=250
73
+
74
+ TRAIN_SAMPLES=220_000_000 # 450B tokens
75
+ LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr
76
+ LR_WARMUP_SAMPLES=183_105 # 375M tokens
77
+
78
+
79
+ OPTIMIZER_ARGS=" \
80
+ --optimizer adam \
81
+ --adam-beta1 0.9 \
82
+ --adam-beta2 0.95 \
83
+ --adam-eps 1e-8 \
84
+ --lr 1.6e-4 \
85
+ --min-lr 1e-5 \
86
+ --lr-decay-style cosine \
87
+ --lr-decay-samples $LR_DECAY_SAMPLES \
88
+ --lr-warmup-samples $LR_WARMUP_SAMPLES \
89
+ --clip-grad 1.0 \
90
+ --weight-decay 1e-1 \
91
+ "
92
+ # for 20h 1190, for 100h 5990
93
+ # --exit-duration-in-mins 1190 \
94
+ EXIT_OPTS=" \
95
+ --exit-duration-in-mins 5990 \
96
+ "
97
+
98
+ GPT_ARGS=" \
99
+ --pp-partition-method 'type:transformer|embedding' \
100
+ --num-layers $NLAYERS \
101
+ --hidden-size $NHIDDEN \
102
+ --num-attention-heads $NHEADS \
103
+ --seq-length $SEQ_LEN \
104
+ --max-position-embeddings $SEQ_LEN \
105
+ --micro-batch-size $MICRO_BATCH_SIZE \
106
+ --rampup-batch-size 192 32 9_765_625 \
107
+ --global-batch-size $GLOBAL_BATCH_SIZE \
108
+ --train-samples $TRAIN_SAMPLES \
109
+ --tokenizer-type PretrainedFromHF \
110
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
111
+ --init-method-std 0.0048 \
112
+ --embed-layernorm \
113
+ --fp16 \
114
+ --seed 42 \
115
+ --position-embedding-type alibi \
116
+ --checkpoint-activations \
117
+ --abort-on-unmet-fused-kernel-constraints \
118
+ --pad-vocab-size-to 250880 \
119
+ $OPTIMIZER_ARGS \
120
+ $EXIT_OPTS \
121
+ "
122
+
123
+ # TODO: decide on efficient eval-interval + eval-iters
124
+
125
+ OUTPUT_ARGS=" \
126
+ --log-interval 1 \
127
+ --save-interval $SAVE_INTERVAL \
128
+ --eval-interval 1000 \
129
+ --eval-iters 1 \
130
+ --tensorboard-dir $TENSORBOARD_PATH \
131
+ --tensorboard-queue-size 5 \
132
+ --log-timers-to-tensorboard \
133
+ --log-batch-size-to-tensorboard \
134
+ --log-validation-ppl-to-tensorboard \
135
+ "
136
+
137
+ ZERO_STAGE=0 # important: bf16 must use z0! it implements its own zero stage 1 equivalent
138
+
139
+ config_json="./ds_config.$SLURM_JOBID.json"
140
+
141
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
142
+ cat <<EOT > $config_json
143
+ {
144
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
145
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
146
+ "gradient_clipping": 1.0,
147
+ "zero_optimization": {
148
+ "stage": $ZERO_STAGE
149
+ },
150
+ "fp16": {
151
+ "enabled": true,
152
+ "loss_scale": 0,
153
+ "loss_scale_window": 500,
154
+ "hysteresis": 2,
155
+ "min_loss_scale": 1,
156
+ "initial_scale_power": 12
157
+ },
158
+ "steps_per_print": 2000,
159
+ "wall_clock_breakdown": false
160
+ }
161
+ EOT
162
+
163
+
164
+ DEEPSPEED_ARGS=" \
165
+ --deepspeed \
166
+ --deepspeed_config ${config_json} \
167
+ --zero-stage ${ZERO_STAGE} \
168
+ --deepspeed-activation-checkpointing \
169
+ "
170
+
171
+ export LAUNCHER="python -u -m torch.distributed.run \
172
+ --nproc_per_node $GPUS_PER_NODE \
173
+ --nnodes $NNODES \
174
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
175
+ --rdzv_backend c10d \
176
+ --max_restarts 0 \
177
+ --tee 3 \
178
+ "
179
+
180
+ export CMD=" \
181
+ `pwd`/pretrain_gpt.py \
182
+ --tensor-model-parallel-size $TP_SIZE \
183
+ --pipeline-model-parallel-size $PP_SIZE \
184
+ $GPT_ARGS \
185
+ $OUTPUT_ARGS \
186
+ --save $CHECKPOINT_PATH \
187
+ --load $CHECKPOINT_PATH \
188
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
189
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
190
+ --data-impl mmap \
191
+ --distributed-backend nccl \
192
+ $DEEPSPEED_ARGS \
193
+ "
194
+
195
+ echo $CMD
196
+
197
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
198
+ export CUDA_LAUNCH_BLOCKING=1
199
+
200
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
201
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
202
+
203
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
204
+
205
+ echo "END TIME: $(date)"
train/tr11-176B-ml/smaller_models/tr11d-760M-ml-continuation.slurm ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr11d-760M-ml
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=8
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH -C v100-32g
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+ #source $six_ALL_CCFRWORK/start-py38-pt110
17
+ #source $six_ALL_CCFRWORK/start-py38-pt111
18
+ source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml
19
+
20
+ echo "START TIME: $(date)"
21
+
22
+ variant=main
23
+
24
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11d-760M-ml
25
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
26
+ REPO_PATH=$DATA_OUTPUT_PATH/tr11d-760M-ml-logs
27
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
28
+ LOGS_PATH=$REPO_PATH/logs/$variant
29
+ mkdir -p $LOGS_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience
35
+ TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-760M.txt
36
+ VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-760M.txt
37
+ CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json
38
+ LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py
39
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH
40
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH
41
+
42
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
43
+
44
+ # defining the right environment variables
45
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
46
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
47
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
48
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
49
+ export HF_DATASETS_OFFLINE=1
50
+ export TRANSFORMERS_OFFLINE=1
51
+
52
+ # testing for potential faulty nodes
53
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
54
+
55
+ # so processes know who to talk to
56
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
57
+ MASTER_PORT=6000
58
+
59
+ GPUS_PER_NODE=4
60
+ NNODES=$SLURM_NNODES
61
+
62
+ PP_SIZE=2
63
+ TP_SIZE=1
64
+
65
+ MICRO_BATCH_SIZE=1
66
+ GLOBAL_BATCH_SIZE=256
67
+
68
+ NLAYERS=24
69
+ NHIDDEN=1536
70
+ NHEADS=16
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=250
74
+
75
+ TRAIN_SAMPLES=220_000_000 # 450B tokens
76
+ LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr
77
+ LR_WARMUP_SAMPLES=183_105 # 375M tokens
78
+
79
+
80
+ OPTIMIZER_ARGS=" \
81
+ --optimizer adam \
82
+ --adam-beta1 0.9 \
83
+ --adam-beta2 0.95 \
84
+ --adam-eps 1e-8 \
85
+ --lr 2.5e-4 \
86
+ --min-lr 1e-5 \
87
+ --lr-decay-style cosine \
88
+ --lr-decay-samples $LR_DECAY_SAMPLES \
89
+ --lr-warmup-samples $LR_WARMUP_SAMPLES \
90
+ --clip-grad 1.0 \
91
+ --weight-decay 1e-1 \
92
+ "
93
+ # for 20h 1190, for 100h 5990
94
+ # --exit-duration-in-mins 1190 \
95
+ EXIT_OPTS=" \
96
+ --exit-duration-in-mins 5990 \
97
+ "
98
+
99
+ GPT_ARGS=" \
100
+ --pp-partition-method 'type:transformer|embedding' \
101
+ --num-layers $NLAYERS \
102
+ --hidden-size $NHIDDEN \
103
+ --num-attention-heads $NHEADS \
104
+ --seq-length $SEQ_LEN \
105
+ --max-position-embeddings $SEQ_LEN \
106
+ --micro-batch-size $MICRO_BATCH_SIZE \
107
+ --rampup-batch-size 192 16 9_765_625 \
108
+ --global-batch-size $GLOBAL_BATCH_SIZE \
109
+ --train-samples $TRAIN_SAMPLES \
110
+ --tokenizer-type PretrainedFromHF \
111
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
112
+ --init-method-std 0.0048 \
113
+ --embed-layernorm \
114
+ --fp16 \
115
+ --seed 42 \
116
+ --position-embedding-type alibi \
117
+ --abort-on-unmet-fused-kernel-constraints \
118
+ --pad-vocab-size-to 250880 \
119
+ $OPTIMIZER_ARGS \
120
+ $EXIT_OPTS \
121
+ "
122
+
123
+ # TODO: decide on efficient eval-interval + eval-iters
124
+
125
+ OUTPUT_ARGS=" \
126
+ --log-interval 1 \
127
+ --save-interval $SAVE_INTERVAL \
128
+ --eval-interval 1000 \
129
+ --eval-iters 1 \
130
+ --tensorboard-dir $TENSORBOARD_PATH \
131
+ --tensorboard-queue-size 5 \
132
+ --log-timers-to-tensorboard \
133
+ --log-batch-size-to-tensorboard \
134
+ --log-validation-ppl-to-tensorboard \
135
+ "
136
+
137
+ ZERO_STAGE=1 # important: bf16 must use z0! it implements its own zero stage 1 equivalent
138
+
139
+ config_json="./ds_config.$SLURM_JOBID.json"
140
+
141
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
142
+ cat <<EOT > $config_json
143
+ {
144
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
145
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
146
+ "gradient_clipping": 1.0,
147
+ "zero_optimization": {
148
+ "stage": $ZERO_STAGE
149
+ },
150
+ "fp16": {
151
+ "enabled": true,
152
+ "loss_scale": 0,
153
+ "loss_scale_window": 500,
154
+ "hysteresis": 2,
155
+ "min_loss_scale": 1,
156
+ "initial_scale_power": 12
157
+ },
158
+ "steps_per_print": 2000,
159
+ "wall_clock_breakdown": false
160
+ }
161
+ EOT
162
+
163
+
164
+ DEEPSPEED_ARGS=" \
165
+ --deepspeed \
166
+ --deepspeed_config ${config_json} \
167
+ --zero-stage ${ZERO_STAGE} \
168
+ --deepspeed-activation-checkpointing \
169
+ "
170
+
171
+ export LAUNCHER="python -u -m torch.distributed.run \
172
+ --nproc_per_node $GPUS_PER_NODE \
173
+ --nnodes $NNODES \
174
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
175
+ --rdzv_backend c10d \
176
+ --max_restarts 0 \
177
+ --tee 3 \
178
+ "
179
+
180
+ export CMD=" \
181
+ `pwd`/pretrain_gpt.py \
182
+ --tensor-model-parallel-size $TP_SIZE \
183
+ --pipeline-model-parallel-size $PP_SIZE \
184
+ $GPT_ARGS \
185
+ $OUTPUT_ARGS \
186
+ --save $CHECKPOINT_PATH \
187
+ --load $CHECKPOINT_PATH \
188
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
189
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
190
+ --data-impl mmap \
191
+ --distributed-backend nccl \
192
+ $DEEPSPEED_ARGS \
193
+ "
194
+
195
+ echo $CMD
196
+
197
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
198
+ export CUDA_LAUNCH_BLOCKING=1
199
+
200
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
201
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
202
+
203
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
204
+
205
+ echo "END TIME: $(date)"
train/tr11-176B-ml/smaller_models/tr11d-760M-ml.slurm ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr11d-760M-ml
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=8
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH -C v100-32g
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+ #source $six_ALL_CCFRWORK/start-py38-pt110
17
+ #source $six_ALL_CCFRWORK/start-py38-pt111
18
+ source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml
19
+
20
+ echo "START TIME: $(date)"
21
+
22
+ variant=main
23
+
24
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11d-760M-ml
25
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
26
+ REPO_PATH=$DATA_OUTPUT_PATH/tr11d-760M-ml-logs
27
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
28
+ LOGS_PATH=$REPO_PATH/logs/$variant
29
+ mkdir -p $LOGS_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience
35
+ TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-760M.txt
36
+ VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-760M.txt
37
+ CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json
38
+ LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py
39
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH
40
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH
41
+
42
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
43
+
44
+ # defining the right environment variables
45
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
46
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
47
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
48
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
49
+ export HF_DATASETS_OFFLINE=1
50
+ export TRANSFORMERS_OFFLINE=1
51
+
52
+ # testing for potential faulty nodes
53
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
54
+
55
+ # so processes know who to talk to
56
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
57
+ MASTER_PORT=6000
58
+
59
+ GPUS_PER_NODE=4
60
+ NNODES=$SLURM_NNODES
61
+
62
+ PP_SIZE=2
63
+ TP_SIZE=1
64
+
65
+ MICRO_BATCH_SIZE=1
66
+ GLOBAL_BATCH_SIZE=256
67
+
68
+ NLAYERS=24
69
+ NHIDDEN=1536
70
+ NHEADS=16
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=250
74
+
75
+ TRAIN_SAMPLES=220_000_000 # 450B tokens
76
+ LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr
77
+ LR_WARMUP_SAMPLES=183_105 # 375M tokens
78
+
79
+
80
+ OPTIMIZER_ARGS=" \
81
+ --optimizer adam \
82
+ --adam-beta1 0.9 \
83
+ --adam-beta2 0.95 \
84
+ --adam-eps 1e-8 \
85
+ --lr 2.5e-4 \
86
+ --min-lr 1e-5 \
87
+ --lr-decay-style cosine \
88
+ --lr-decay-samples $LR_DECAY_SAMPLES \
89
+ --lr-warmup-samples $LR_WARMUP_SAMPLES \
90
+ --clip-grad 1.0 \
91
+ --weight-decay 1e-1 \
92
+ "
93
+ # for 20h 1190, for 100h 5990
94
+ # --exit-duration-in-mins 1190 \
95
+ EXIT_OPTS=" \
96
+ --exit-duration-in-mins 5990 \
97
+ "
98
+
99
+ GPT_ARGS=" \
100
+ --pp-partition-method 'type:transformer|embedding' \
101
+ --num-layers $NLAYERS \
102
+ --hidden-size $NHIDDEN \
103
+ --num-attention-heads $NHEADS \
104
+ --seq-length $SEQ_LEN \
105
+ --max-position-embeddings $SEQ_LEN \
106
+ --micro-batch-size $MICRO_BATCH_SIZE \
107
+ --rampup-batch-size 192 16 9_765_625 \
108
+ --global-batch-size $GLOBAL_BATCH_SIZE \
109
+ --train-samples $TRAIN_SAMPLES \
110
+ --tokenizer-type PretrainedFromHF \
111
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
112
+ --init-method-std 0.0048 \
113
+ --embed-layernorm \
114
+ --fp16 \
115
+ --seed 42 \
116
+ --position-embedding-type alibi \
117
+ --abort-on-unmet-fused-kernel-constraints \
118
+ --pad-vocab-size-to 250880 \
119
+ $OPTIMIZER_ARGS \
120
+ $EXIT_OPTS \
121
+ "
122
+
123
+ # TODO: decide on efficient eval-interval + eval-iters
124
+
125
+ OUTPUT_ARGS=" \
126
+ --log-interval 1 \
127
+ --save-interval $SAVE_INTERVAL \
128
+ --eval-interval 1000 \
129
+ --eval-iters 1 \
130
+ --tensorboard-dir $TENSORBOARD_PATH \
131
+ --tensorboard-queue-size 5 \
132
+ --log-timers-to-tensorboard \
133
+ --log-batch-size-to-tensorboard \
134
+ --log-validation-ppl-to-tensorboard \
135
+ "
136
+
137
+ ZERO_STAGE=0 # important: bf16 must use z0! it implements its own zero stage 1 equivalent
138
+
139
+ config_json="./ds_config.$SLURM_JOBID.json"
140
+
141
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
142
+ cat <<EOT > $config_json
143
+ {
144
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
145
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
146
+ "gradient_clipping": 1.0,
147
+ "zero_optimization": {
148
+ "stage": $ZERO_STAGE
149
+ },
150
+ "fp16": {
151
+ "enabled": true,
152
+ "loss_scale": 0,
153
+ "loss_scale_window": 500,
154
+ "hysteresis": 2,
155
+ "min_loss_scale": 1,
156
+ "initial_scale_power": 12
157
+ },
158
+ "steps_per_print": 2000,
159
+ "wall_clock_breakdown": false
160
+ }
161
+ EOT
162
+
163
+
164
+ DEEPSPEED_ARGS=" \
165
+ --deepspeed \
166
+ --deepspeed_config ${config_json} \
167
+ --zero-stage ${ZERO_STAGE} \
168
+ --deepspeed-activation-checkpointing \
169
+ "
170
+
171
+ export LAUNCHER="python -u -m torch.distributed.run \
172
+ --nproc_per_node $GPUS_PER_NODE \
173
+ --nnodes $NNODES \
174
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
175
+ --rdzv_backend c10d \
176
+ --max_restarts 0 \
177
+ --tee 3 \
178
+ "
179
+
180
+ export CMD=" \
181
+ `pwd`/pretrain_gpt.py \
182
+ --tensor-model-parallel-size $TP_SIZE \
183
+ --pipeline-model-parallel-size $PP_SIZE \
184
+ $GPT_ARGS \
185
+ $OUTPUT_ARGS \
186
+ --save $CHECKPOINT_PATH \
187
+ --load $CHECKPOINT_PATH \
188
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
189
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
190
+ --data-impl mmap \
191
+ --distributed-backend nccl \
192
+ $DEEPSPEED_ARGS \
193
+ "
194
+
195
+ echo $CMD
196
+
197
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
198
+ export CUDA_LAUNCH_BLOCKING=1
199
+
200
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
201
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
202
+
203
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
204
+
205
+ echo "END TIME: $(date)"
train/tr11-176B-ml/smaller_models/tr11e-350M-ml-continuation.slurm ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr11e-350M-ml
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=8
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH -C v100-32g
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+ #source $six_ALL_CCFRWORK/start-py38-pt110
17
+ #source $six_ALL_CCFRWORK/start-py38-pt111
18
+ source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml
19
+
20
+ echo "START TIME: $(date)"
21
+
22
+ variant=main
23
+
24
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11e-350M-ml
25
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
26
+ REPO_PATH=$DATA_OUTPUT_PATH/tr11e-350M-ml-logs
27
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
28
+ LOGS_PATH=$REPO_PATH/logs/$variant
29
+ mkdir -p $LOGS_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience
35
+ TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-350M.txt
36
+ VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-350M.txt
37
+ CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json
38
+ LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py
39
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH
40
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH
41
+
42
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
43
+
44
+ # defining the right environment variables
45
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
46
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
47
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
48
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
49
+ export HF_DATASETS_OFFLINE=1
50
+ export TRANSFORMERS_OFFLINE=1
51
+
52
+ # testing for potential faulty nodes
53
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
54
+
55
+ # so processes know who to talk to
56
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
57
+ MASTER_PORT=6000
58
+
59
+ GPUS_PER_NODE=4
60
+ NNODES=$SLURM_NNODES
61
+
62
+ PP_SIZE=1
63
+ TP_SIZE=1
64
+
65
+ MICRO_BATCH_SIZE=1
66
+ GLOBAL_BATCH_SIZE=256
67
+
68
+ NLAYERS=24
69
+ NHIDDEN=1024
70
+ NHEADS=16
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=250
74
+
75
+ TRAIN_SAMPLES=220_000_000 # 450B tokens
76
+ LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr
77
+ LR_WARMUP_SAMPLES=183_105 # 375M tokens
78
+
79
+
80
+ OPTIMIZER_ARGS=" \
81
+ --optimizer adam \
82
+ --adam-beta1 0.9 \
83
+ --adam-beta2 0.95 \
84
+ --adam-eps 1e-8 \
85
+ --lr 3.0e-4 \
86
+ --min-lr 1e-5 \
87
+ --lr-decay-style cosine \
88
+ --lr-decay-samples $LR_DECAY_SAMPLES \
89
+ --lr-warmup-samples $LR_WARMUP_SAMPLES \
90
+ --clip-grad 1.0 \
91
+ --weight-decay 1e-1 \
92
+ "
93
+ # for 20h 1190, for 100h 5990
94
+ # --exit-duration-in-mins 1190 \
95
+ EXIT_OPTS=" \
96
+ --exit-duration-in-mins 5990 \
97
+ "
98
+
99
+ GPT_ARGS=" \
100
+ --pp-partition-method 'type:transformer|embedding' \
101
+ --num-layers $NLAYERS \
102
+ --hidden-size $NHIDDEN \
103
+ --num-attention-heads $NHEADS \
104
+ --seq-length $SEQ_LEN \
105
+ --max-position-embeddings $SEQ_LEN \
106
+ --micro-batch-size $MICRO_BATCH_SIZE \
107
+ --rampup-batch-size 192 32 9_765_625 \
108
+ --global-batch-size $GLOBAL_BATCH_SIZE \
109
+ --train-samples $TRAIN_SAMPLES \
110
+ --tokenizer-type PretrainedFromHF \
111
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
112
+ --init-method-std 0.0048 \
113
+ --embed-layernorm \
114
+ --fp16 \
115
+ --seed 42 \
116
+ --position-embedding-type alibi \
117
+ --abort-on-unmet-fused-kernel-constraints \
118
+ --pad-vocab-size-to 250880 \
119
+ $OPTIMIZER_ARGS \
120
+ $EXIT_OPTS \
121
+ "
122
+
123
+ # TODO: decide on efficient eval-interval + eval-iters
124
+
125
+ OUTPUT_ARGS=" \
126
+ --log-interval 1 \
127
+ --save-interval $SAVE_INTERVAL \
128
+ --eval-interval 1000 \
129
+ --eval-iters 1 \
130
+ --tensorboard-dir $TENSORBOARD_PATH \
131
+ --tensorboard-queue-size 5 \
132
+ --log-timers-to-tensorboard \
133
+ --log-batch-size-to-tensorboard \
134
+ --log-validation-ppl-to-tensorboard \
135
+ "
136
+
137
+ ZERO_STAGE=1 # important: bf16 must use z0! it implements its own zero stage 1 equivalent
138
+
139
+ config_json="./ds_config.$SLURM_JOBID.json"
140
+
141
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
142
+ cat <<EOT > $config_json
143
+ {
144
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
145
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
146
+ "gradient_clipping": 1.0,
147
+ "zero_optimization": {
148
+ "stage": $ZERO_STAGE
149
+ },
150
+ "fp16": {
151
+ "enabled": true,
152
+ "loss_scale": 0,
153
+ "loss_scale_window": 500,
154
+ "hysteresis": 2,
155
+ "min_loss_scale": 1,
156
+ "initial_scale_power": 12
157
+ },
158
+ "steps_per_print": 2000,
159
+ "wall_clock_breakdown": false
160
+ }
161
+ EOT
162
+
163
+
164
+ DEEPSPEED_ARGS=" \
165
+ --deepspeed \
166
+ --deepspeed_config ${config_json} \
167
+ --zero-stage ${ZERO_STAGE} \
168
+ --deepspeed-activation-checkpointing \
169
+ "
170
+
171
+ export LAUNCHER="python -u -m torch.distributed.run \
172
+ --nproc_per_node $GPUS_PER_NODE \
173
+ --nnodes $NNODES \
174
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
175
+ --rdzv_backend c10d \
176
+ --max_restarts 0 \
177
+ --tee 3 \
178
+ "
179
+
180
+ export CMD=" \
181
+ `pwd`/pretrain_gpt.py \
182
+ --tensor-model-parallel-size $TP_SIZE \
183
+ --pipeline-model-parallel-size $PP_SIZE \
184
+ $GPT_ARGS \
185
+ $OUTPUT_ARGS \
186
+ --save $CHECKPOINT_PATH \
187
+ --load $CHECKPOINT_PATH \
188
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
189
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
190
+ --data-impl mmap \
191
+ --distributed-backend nccl \
192
+ $DEEPSPEED_ARGS \
193
+ "
194
+
195
+ echo $CMD
196
+
197
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
198
+ export CUDA_LAUNCH_BLOCKING=1
199
+
200
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
201
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
202
+
203
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
204
+
205
+ echo "END TIME: $(date)"
train/tr11-176B-ml/smaller_models/tr11e-350M-ml.slurm ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr11e-350M-ml
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=8
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH -C v100-32g
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+ #source $six_ALL_CCFRWORK/start-py38-pt110
17
+ #source $six_ALL_CCFRWORK/start-py38-pt111
18
+ source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml
19
+
20
+ echo "START TIME: $(date)"
21
+
22
+ variant=main
23
+
24
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11e-350M-ml
25
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
26
+ REPO_PATH=$DATA_OUTPUT_PATH/tr11e-350M-ml-logs
27
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
28
+ LOGS_PATH=$REPO_PATH/logs/$variant
29
+ mkdir -p $LOGS_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience
35
+ TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-350M.txt
36
+ VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-350M.txt
37
+ CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json
38
+ LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py
39
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH
40
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH
41
+
42
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
43
+
44
+ # defining the right environment variables
45
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
46
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
47
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
48
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
49
+ export HF_DATASETS_OFFLINE=1
50
+ export TRANSFORMERS_OFFLINE=1
51
+
52
+ # testing for potential faulty nodes
53
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
54
+
55
+ # so processes know who to talk to
56
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
57
+ MASTER_PORT=6000
58
+
59
+ GPUS_PER_NODE=4
60
+ NNODES=$SLURM_NNODES
61
+
62
+ PP_SIZE=1
63
+ TP_SIZE=1
64
+
65
+ MICRO_BATCH_SIZE=1
66
+ GLOBAL_BATCH_SIZE=256
67
+
68
+ NLAYERS=24
69
+ NHIDDEN=1024
70
+ NHEADS=16
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=250
74
+
75
+ TRAIN_SAMPLES=220_000_000 # 450B tokens
76
+ LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr
77
+ LR_WARMUP_SAMPLES=183_105 # 375M tokens
78
+
79
+
80
+ OPTIMIZER_ARGS=" \
81
+ --optimizer adam \
82
+ --adam-beta1 0.9 \
83
+ --adam-beta2 0.95 \
84
+ --adam-eps 1e-8 \
85
+ --lr 3.0e-4 \
86
+ --min-lr 1e-5 \
87
+ --lr-decay-style cosine \
88
+ --lr-decay-samples $LR_DECAY_SAMPLES \
89
+ --lr-warmup-samples $LR_WARMUP_SAMPLES \
90
+ --clip-grad 1.0 \
91
+ --weight-decay 1e-1 \
92
+ "
93
+ # for 20h 1190, for 100h 5990
94
+ # --exit-duration-in-mins 1190 \
95
+ EXIT_OPTS=" \
96
+ --exit-duration-in-mins 5990 \
97
+ "
98
+
99
+ GPT_ARGS=" \
100
+ --pp-partition-method 'type:transformer|embedding' \
101
+ --num-layers $NLAYERS \
102
+ --hidden-size $NHIDDEN \
103
+ --num-attention-heads $NHEADS \
104
+ --seq-length $SEQ_LEN \
105
+ --max-position-embeddings $SEQ_LEN \
106
+ --micro-batch-size $MICRO_BATCH_SIZE \
107
+ --rampup-batch-size 192 32 9_765_625 \
108
+ --global-batch-size $GLOBAL_BATCH_SIZE \
109
+ --train-samples $TRAIN_SAMPLES \
110
+ --tokenizer-type PretrainedFromHF \
111
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
112
+ --init-method-std 0.0048 \
113
+ --embed-layernorm \
114
+ --fp16 \
115
+ --seed 42 \
116
+ --position-embedding-type alibi \
117
+ --abort-on-unmet-fused-kernel-constraints \
118
+ --pad-vocab-size-to 250880 \
119
+ $OPTIMIZER_ARGS \
120
+ $EXIT_OPTS \
121
+ "
122
+
123
+ # TODO: decide on efficient eval-interval + eval-iters
124
+
125
+ OUTPUT_ARGS=" \
126
+ --log-interval 1 \
127
+ --save-interval $SAVE_INTERVAL \
128
+ --eval-interval 1000 \
129
+ --eval-iters 1 \
130
+ --tensorboard-dir $TENSORBOARD_PATH \
131
+ --tensorboard-queue-size 5 \
132
+ --log-timers-to-tensorboard \
133
+ --log-batch-size-to-tensorboard \
134
+ --log-validation-ppl-to-tensorboard \
135
+ "
136
+
137
+ ZERO_STAGE=0 # important: bf16 must use z0! it implements its own zero stage 1 equivalent
138
+
139
+ config_json="./ds_config.$SLURM_JOBID.json"
140
+
141
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
142
+ cat <<EOT > $config_json
143
+ {
144
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
145
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
146
+ "gradient_clipping": 1.0,
147
+ "zero_optimization": {
148
+ "stage": $ZERO_STAGE
149
+ },
150
+ "fp16": {
151
+ "enabled": true,
152
+ "loss_scale": 0,
153
+ "loss_scale_window": 500,
154
+ "hysteresis": 2,
155
+ "min_loss_scale": 1,
156
+ "initial_scale_power": 12
157
+ },
158
+ "steps_per_print": 2000,
159
+ "wall_clock_breakdown": false
160
+ }
161
+ EOT
162
+
163
+
164
+ DEEPSPEED_ARGS=" \
165
+ --deepspeed \
166
+ --deepspeed_config ${config_json} \
167
+ --zero-stage ${ZERO_STAGE} \
168
+ --deepspeed-activation-checkpointing \
169
+ "
170
+
171
+ export LAUNCHER="python -u -m torch.distributed.run \
172
+ --nproc_per_node $GPUS_PER_NODE \
173
+ --nnodes $NNODES \
174
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
175
+ --rdzv_backend c10d \
176
+ --max_restarts 0 \
177
+ --tee 3 \
178
+ "
179
+
180
+ export CMD=" \
181
+ `pwd`/pretrain_gpt.py \
182
+ --tensor-model-parallel-size $TP_SIZE \
183
+ --pipeline-model-parallel-size $PP_SIZE \
184
+ $GPT_ARGS \
185
+ $OUTPUT_ARGS \
186
+ --save $CHECKPOINT_PATH \
187
+ --load $CHECKPOINT_PATH \
188
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
189
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
190
+ --data-impl mmap \
191
+ --distributed-backend nccl \
192
+ $DEEPSPEED_ARGS \
193
+ "
194
+
195
+ echo $CMD
196
+
197
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
198
+ export CUDA_LAUNCH_BLOCKING=1
199
+
200
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
201
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
202
+
203
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
204
+
205
+ echo "END TIME: $(date)"
train/tr11-176B-ml/smaller_models/tr11f-6B3-ml-continuation.slurm ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr11f-6B3-ml
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=16
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:8 # number of gpus
10
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+
14
+ set -x -e
15
+
16
+ source /gpfsscratch/rech/six/commun/commun/experiments/muennighoff/muennighoffsmallmodels
17
+
18
+ echo "START TIME: $(date)"
19
+
20
+ variant=main
21
+
22
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11f-6B3-ml
23
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
24
+ REPO_PATH=$DATA_OUTPUT_PATH/tr11f-6B3-ml-logs
25
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
26
+ LOGS_PATH=$REPO_PATH/logs/$variant
27
+ mkdir -p $LOGS_PATH
28
+
29
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed
30
+ cd $MEGATRON_DEEPSPEED_REPO
31
+
32
+ BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience
33
+ TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-6B3.txt
34
+ VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-6B3.txt
35
+ CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json
36
+ LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py
37
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH
38
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH
39
+
40
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
41
+
42
+ # defining the right environment variables
43
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
44
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
45
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
46
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
47
+ export HF_DATASETS_OFFLINE=1
48
+ export TRANSFORMERS_OFFLINE=1
49
+
50
+ # testing for potential faulty nodes
51
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
52
+
53
+ # so processes know who to talk to
54
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
55
+ MASTER_PORT=6000
56
+
57
+ GPUS_PER_NODE=8
58
+ NNODES=$SLURM_NNODES
59
+
60
+ PP_SIZE=1
61
+ TP_SIZE=1
62
+
63
+ MICRO_BATCH_SIZE=1
64
+ GLOBAL_BATCH_SIZE=512
65
+
66
+ NLAYERS=30
67
+ NHIDDEN=4096
68
+ NHEADS=32
69
+ SEQ_LEN=2048
70
+
71
+ SAVE_INTERVAL=1
72
+
73
+ TRAIN_SAMPLES=220_000_000 # 450B tokens
74
+ LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr
75
+ LR_WARMUP_SAMPLES=183_105 # 375M tokens
76
+
77
+
78
+ OPTIMIZER_ARGS=" \
79
+ --optimizer adam \
80
+ --adam-beta1 0.9 \
81
+ --adam-beta2 0.95 \
82
+ --adam-eps 1e-8 \
83
+ --lr 1.2e-4 \
84
+ --min-lr 1e-5 \
85
+ --lr-decay-style cosine \
86
+ --lr-decay-samples $LR_DECAY_SAMPLES \
87
+ --lr-warmup-samples $LR_WARMUP_SAMPLES \
88
+ --clip-grad 1.0 \
89
+ --weight-decay 1e-1 \
90
+ "
91
+ # for 20h 1190, for 100h 5990
92
+ # --exit-duration-in-mins 1190 \
93
+ EXIT_OPTS=" \
94
+ --exit-duration-in-mins 5990 \
95
+ "
96
+
97
+ GPT_ARGS=" \
98
+ --pp-partition-method 'type:transformer|embedding' \
99
+ --num-layers $NLAYERS \
100
+ --hidden-size $NHIDDEN \
101
+ --num-attention-heads $NHEADS \
102
+ --seq-length $SEQ_LEN \
103
+ --max-position-embeddings $SEQ_LEN \
104
+ --micro-batch-size $MICRO_BATCH_SIZE \
105
+ --rampup-batch-size 192 32 9_765_625 \
106
+ --global-batch-size $GLOBAL_BATCH_SIZE \
107
+ --train-samples $TRAIN_SAMPLES \
108
+ --tokenizer-type PretrainedFromHF \
109
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
110
+ --init-method-std 0.0048 \
111
+ --embed-layernorm \
112
+ --fp16 \
113
+ --seed 42 \
114
+ --position-embedding-type alibi \
115
+ --checkpoint-activations \
116
+ --abort-on-unmet-fused-kernel-constraints \
117
+ --pad-vocab-size-to 250880 \
118
+ $OPTIMIZER_ARGS \
119
+ $EXIT_OPTS \
120
+ "
121
+
122
+ # TODO: decide on efficient eval-interval + eval-iters
123
+
124
+ OUTPUT_ARGS=" \
125
+ --log-interval 1 \
126
+ --save-interval $SAVE_INTERVAL \
127
+ --eval-interval 1000 \
128
+ --eval-iters 1 \
129
+ --tensorboard-dir $TENSORBOARD_PATH \
130
+ --tensorboard-queue-size 5 \
131
+ --log-timers-to-tensorboard \
132
+ --log-batch-size-to-tensorboard \
133
+ --log-validation-ppl-to-tensorboard \
134
+ "
135
+
136
+ ZERO_STAGE=1 # important: bf16 must use z0! it implements its own zero stage 1 equivalent
137
+
138
+ config_json="./ds_config.$SLURM_JOBID.json"
139
+
140
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
141
+ cat <<EOT > $config_json
142
+ {
143
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
144
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
145
+ "gradient_clipping": 1.0,
146
+ "zero_optimization": {
147
+ "stage": $ZERO_STAGE
148
+ },
149
+ "fp16": {
150
+ "enabled": true,
151
+ "loss_scale": 0,
152
+ "loss_scale_window": 500,
153
+ "hysteresis": 2,
154
+ "min_loss_scale": 1,
155
+ "initial_scale_power": 12
156
+ },
157
+ "steps_per_print": 2000,
158
+ "wall_clock_breakdown": false
159
+ }
160
+ EOT
161
+
162
+
163
+ DEEPSPEED_ARGS=" \
164
+ --deepspeed \
165
+ --deepspeed_config ${config_json} \
166
+ --zero-stage ${ZERO_STAGE} \
167
+ --deepspeed-activation-checkpointing \
168
+ "
169
+
170
+ export LAUNCHER="python -u -m torch.distributed.run \
171
+ --nproc_per_node $GPUS_PER_NODE \
172
+ --nnodes $NNODES \
173
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
174
+ --rdzv_backend c10d \
175
+ --max_restarts 0 \
176
+ --tee 3 \
177
+ "
178
+
179
+ export CMD=" \
180
+ `pwd`/pretrain_gpt.py \
181
+ --tensor-model-parallel-size $TP_SIZE \
182
+ --pipeline-model-parallel-size $PP_SIZE \
183
+ $GPT_ARGS \
184
+ $OUTPUT_ARGS \
185
+ --save $CHECKPOINT_PATH \
186
+ --load $CHECKPOINT_PATH \
187
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
188
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
189
+ --data-impl mmap \
190
+ --distributed-backend nccl \
191
+ $DEEPSPEED_ARGS \
192
+ "
193
+
194
+ echo $CMD
195
+
196
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
197
+ export CUDA_LAUNCH_BLOCKING=1
198
+
199
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
200
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
201
+
202
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
203
+
204
+ echo "END TIME: $(date)"
train/tr11-176B-ml/smaller_models/tr11f-6B3-ml.slurm ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr11f-6B3-ml
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=32
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH -C v100-32g
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+ #source $six_ALL_CCFRWORK/start-py38-pt110
17
+ #source $six_ALL_CCFRWORK/start-py38-pt111
18
+ source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml
19
+
20
+ echo "START TIME: $(date)"
21
+
22
+ variant=main
23
+
24
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11f-6B3-ml
25
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
26
+ REPO_PATH=$DATA_OUTPUT_PATH/tr11f-6B3-ml-logs
27
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
28
+ LOGS_PATH=$REPO_PATH/logs/$variant
29
+ mkdir -p $LOGS_PATH
30
+
31
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience
35
+ TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-6B3.txt
36
+ VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-6B3.txt
37
+ CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json
38
+ LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py
39
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH
40
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH
41
+
42
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
43
+
44
+ # defining the right environment variables
45
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
46
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
47
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
48
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
49
+ export HF_DATASETS_OFFLINE=1
50
+ export TRANSFORMERS_OFFLINE=1
51
+
52
+ # testing for potential faulty nodes
53
+ # srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
54
+
55
+ # so processes know who to talk to
56
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
57
+ MASTER_PORT=6000
58
+
59
+ GPUS_PER_NODE=4
60
+ NNODES=$SLURM_NNODES
61
+
62
+ PP_SIZE=4
63
+ TP_SIZE=4
64
+
65
+ MICRO_BATCH_SIZE=1
66
+ GLOBAL_BATCH_SIZE=512
67
+
68
+ NLAYERS=30
69
+ NHIDDEN=4096
70
+ NHEADS=32
71
+ SEQ_LEN=2048
72
+
73
+ SAVE_INTERVAL=250
74
+
75
+ TRAIN_SAMPLES=220_000_000 # 450B tokens
76
+ LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr
77
+ LR_WARMUP_SAMPLES=183_105 # 375M tokens
78
+
79
+
80
+ OPTIMIZER_ARGS=" \
81
+ --optimizer adam \
82
+ --adam-beta1 0.9 \
83
+ --adam-beta2 0.95 \
84
+ --adam-eps 1e-8 \
85
+ --lr 1.2e-4 \
86
+ --min-lr 1e-5 \
87
+ --lr-decay-style cosine \
88
+ --lr-decay-samples $LR_DECAY_SAMPLES \
89
+ --lr-warmup-samples $LR_WARMUP_SAMPLES \
90
+ --clip-grad 1.0 \
91
+ --weight-decay 1e-1 \
92
+ "
93
+ # for 20h 1190, for 100h 5990
94
+ # --exit-duration-in-mins 1190 \
95
+ EXIT_OPTS=" \
96
+ --exit-duration-in-mins 5990 \
97
+ "
98
+
99
+ GPT_ARGS=" \
100
+ --pp-partition-method 'type:transformer|embedding' \
101
+ --num-layers $NLAYERS \
102
+ --hidden-size $NHIDDEN \
103
+ --num-attention-heads $NHEADS \
104
+ --seq-length $SEQ_LEN \
105
+ --max-position-embeddings $SEQ_LEN \
106
+ --micro-batch-size $MICRO_BATCH_SIZE \
107
+ --rampup-batch-size 192 32 9_765_625 \
108
+ --global-batch-size $GLOBAL_BATCH_SIZE \
109
+ --train-samples $TRAIN_SAMPLES \
110
+ --tokenizer-type PretrainedFromHF \
111
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
112
+ --init-method-std 0.0048 \
113
+ --embed-layernorm \
114
+ --fp16 \
115
+ --seed 42 \
116
+ --position-embedding-type alibi \
117
+ --checkpoint-activations \
118
+ --abort-on-unmet-fused-kernel-constraints \
119
+ --pad-vocab-size-to 250880 \
120
+ $OPTIMIZER_ARGS \
121
+ $EXIT_OPTS \
122
+ "
123
+
124
+ # TODO: decide on efficient eval-interval + eval-iters
125
+
126
+ OUTPUT_ARGS=" \
127
+ --log-interval 1 \
128
+ --save-interval $SAVE_INTERVAL \
129
+ --eval-interval 1000 \
130
+ --eval-iters 1 \
131
+ --tensorboard-dir $TENSORBOARD_PATH \
132
+ --tensorboard-queue-size 5 \
133
+ --log-timers-to-tensorboard \
134
+ --log-batch-size-to-tensorboard \
135
+ --log-validation-ppl-to-tensorboard \
136
+ "
137
+
138
+ ZERO_STAGE=0 # important: bf16 must use z0! it implements its own zero stage 1 equivalent
139
+
140
+ config_json="./ds_config.$SLURM_JOBID.json"
141
+
142
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
143
+ cat <<EOT > $config_json
144
+ {
145
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
146
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
147
+ "gradient_clipping": 1.0,
148
+ "zero_optimization": {
149
+ "stage": $ZERO_STAGE
150
+ },
151
+ "fp16": {
152
+ "enabled": true,
153
+ "loss_scale": 0,
154
+ "loss_scale_window": 500,
155
+ "hysteresis": 2,
156
+ "min_loss_scale": 1,
157
+ "initial_scale_power": 12
158
+ },
159
+ "steps_per_print": 2000,
160
+ "wall_clock_breakdown": false
161
+ }
162
+ EOT
163
+
164
+
165
+ DEEPSPEED_ARGS=" \
166
+ --deepspeed \
167
+ --deepspeed_config ${config_json} \
168
+ --zero-stage ${ZERO_STAGE} \
169
+ --deepspeed-activation-checkpointing \
170
+ "
171
+
172
+ export LAUNCHER="python -u -m torch.distributed.run \
173
+ --nproc_per_node $GPUS_PER_NODE \
174
+ --nnodes $NNODES \
175
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
176
+ --rdzv_backend c10d \
177
+ --max_restarts 0 \
178
+ --tee 3 \
179
+ "
180
+
181
+ export CMD=" \
182
+ `pwd`/pretrain_gpt.py \
183
+ --tensor-model-parallel-size $TP_SIZE \
184
+ --pipeline-model-parallel-size $PP_SIZE \
185
+ $GPT_ARGS \
186
+ $OUTPUT_ARGS \
187
+ --save $CHECKPOINT_PATH \
188
+ --load $CHECKPOINT_PATH \
189
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
190
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
191
+ --data-impl mmap \
192
+ --distributed-backend nccl \
193
+ $DEEPSPEED_ARGS \
194
+ "
195
+
196
+ echo $CMD
197
+
198
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
199
+ export CUDA_LAUNCH_BLOCKING=1
200
+
201
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
202
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
203
+
204
+ clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
205
+
206
+ echo "END TIME: $(date)"
train/tr11-176B-ml/tr11-176B-ml.slurm ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr11-176B-ml
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=24
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ #source $six_ALL_CCFRWORK/start-py38-pt110
19
+ #source $six_ALL_CCFRWORK/start-py38-pt111
20
+ source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml
21
+
22
+ echo "START TIME: $(date)"
23
+
24
+ variant=main
25
+
26
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml
27
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant
28
+ REPO_PATH=$DATA_OUTPUT_PATH/tr11-176B-ml-logs
29
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant
30
+ LOGS_PATH=$REPO_PATH/logs/$variant
31
+ mkdir -p $LOGS_PATH
32
+
33
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed
34
+ cd $MEGATRON_DEEPSPEED_REPO
35
+
36
+ KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr11-176B-exp1
37
+
38
+ BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience
39
+ TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits.txt
40
+ VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits.txt
41
+ CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json
42
+ LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py
43
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH
44
+ python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH
45
+
46
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
47
+
48
+ # defining the right environment variables
49
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
50
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
51
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
52
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
53
+ export HF_DATASETS_OFFLINE=1
54
+ export TRANSFORMERS_OFFLINE=1
55
+
56
+ # testing for potential faulty nodes
57
+ # srun --jobid $SLURM_JOB_ID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
58
+
59
+ # so processes know who to talk to
60
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
61
+ MASTER_PORT=6000
62
+
63
+ GPUS_PER_NODE=8
64
+ NNODES=$SLURM_NNODES
65
+
66
+ TP_SIZE=4
67
+ PP_SIZE=12
68
+
69
+ MICRO_BATCH_SIZE=2 # was MBS=1 till GBS=784
70
+ GLOBAL_BATCH_SIZE=2048 # 4.2M tokens. It is larger than the initial plan of 3.2M tokens to get higher throughput
71
+
72
+ NHIDDEN=14336
73
+ NLAYERS=70
74
+ NHEADS=112
75
+ SEQ_LEN=2048
76
+
77
+ SAVE_INTERVAL=100
78
+
79
+ TRAIN_SAMPLES=220_000_000 # 450B tokens
80
+ LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr
81
+ LR_WARMUP_SAMPLES=183_105 # 375M tokens
82
+
83
+
84
+ OPTIMIZER_ARGS=" \
85
+ --optimizer adam \
86
+ --adam-beta1 0.9 \
87
+ --adam-beta2 0.95 \
88
+ --adam-eps 1e-8 \
89
+ --lr 6e-5 \
90
+ --min-lr 6e-6 \
91
+ --lr-decay-style cosine \
92
+ --lr-decay-samples $LR_DECAY_SAMPLES \
93
+ --lr-warmup-samples $LR_WARMUP_SAMPLES \
94
+ --clip-grad 1.0 \
95
+ --weight-decay 1e-1 \
96
+ "
97
+ # for 20h 1190, for 100h 5990
98
+ # --exit-duration-in-mins 1190 \
99
+ EXIT_OPTS=" \
100
+ --exit-duration-in-mins 5990 \
101
+ "
102
+
103
+ GPT_ARGS=" \
104
+ --pp-partition-method 'type:transformer|embedding' \
105
+ --num-layers $NLAYERS \
106
+ --hidden-size $NHIDDEN \
107
+ --num-attention-heads $NHEADS \
108
+ --seq-length $SEQ_LEN \
109
+ --max-position-embeddings $SEQ_LEN \
110
+ --micro-batch-size $MICRO_BATCH_SIZE \
111
+ --rampup-batch-size 192 16 9_765_625 \
112
+ --global-batch-size $GLOBAL_BATCH_SIZE \
113
+ --train-samples $TRAIN_SAMPLES \
114
+ --tokenizer-type PretrainedFromHF \
115
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
116
+ --init-method-std 0.0048 \
117
+ --embed-layernorm \
118
+ --sync-tp-duplicated-parameters \
119
+ --bf16 \
120
+ --seed 42 \
121
+ --position-embedding-type alibi \
122
+ --checkpoint-activations \
123
+ --abort-on-unmet-fused-kernel-constraints \
124
+ --kill-switch-path $KILL_SWITCH_PATH \
125
+ --pad-vocab-size-to 250880 \
126
+ $OPTIMIZER_ARGS \
127
+ $EXIT_OPTS \
128
+ "
129
+
130
+ # TODO: decide on efficient eval-interval + eval-iters
131
+
132
+ OUTPUT_ARGS=" \
133
+ --log-interval 1 \
134
+ --save-interval $SAVE_INTERVAL \
135
+ --eval-interval 1000 \
136
+ --eval-iters 1 \
137
+ --tensorboard-dir $TENSORBOARD_PATH \
138
+ --tensorboard-queue-size 5 \
139
+ --log-timers-to-tensorboard \
140
+ --log-batch-size-to-tensorboard \
141
+ --log-validation-ppl-to-tensorboard \
142
+ "
143
+
144
+ ZERO_STAGE=0 # important: bf16 must use z0! it implements its own zero stage 1 equivalent
145
+
146
+ config_json="./ds_config.$SLURM_JOB_ID.json"
147
+
148
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
149
+ cat <<EOT > $config_json
150
+ {
151
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
152
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
153
+ "gradient_clipping": 1.0,
154
+ "zero_optimization": {
155
+ "stage": $ZERO_STAGE
156
+ },
157
+ "bf16": {
158
+ "enabled": true
159
+ },
160
+ "steps_per_print": 2000,
161
+ "wall_clock_breakdown": false
162
+ }
163
+ EOT
164
+
165
+
166
+ DEEPSPEED_ARGS=" \
167
+ --deepspeed \
168
+ --deepspeed_config ${config_json} \
169
+ --zero-stage ${ZERO_STAGE} \
170
+ --deepspeed-activation-checkpointing \
171
+ "
172
+
173
+ export LAUNCHER="python -u -m torch.distributed.run \
174
+ --nproc_per_node $GPUS_PER_NODE \
175
+ --nnodes $NNODES \
176
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
177
+ --rdzv_backend c10d \
178
+ --max_restarts 0 \
179
+ --tee 3 \
180
+ "
181
+
182
+ # --universal-checkpoint \
183
+ export CMD=" \
184
+ `pwd`/pretrain_gpt.py \
185
+ --tensor-model-parallel-size $TP_SIZE \
186
+ --pipeline-model-parallel-size $PP_SIZE \
187
+ $GPT_ARGS \
188
+ $OUTPUT_ARGS \
189
+ --save $CHECKPOINT_PATH \
190
+ --load $CHECKPOINT_PATH \
191
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
192
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
193
+ --num-workers 2 \
194
+ --valid-num-workers 0 \
195
+ --data-impl mmap \
196
+ --distributed-backend nccl \
197
+ $DEEPSPEED_ARGS \
198
+ "
199
+
200
+ echo $CMD
201
+
202
+ # do not remove or the training will hang and nodes will be lost w/o this workaround
203
+ export CUDA_LAUNCH_BLOCKING=1
204
+
205
+ # hide duplicated errors using this hack - will be properly fixed in pt-1.12
206
+ export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json
207
+
208
+ # force crashing on nccl issues like hanging broadcast
209
+ export NCCL_ASYNC_ERROR_HANDLING=1
210
+
211
+ # srun error handling:
212
+ # --wait=60: wait 60 sec after the first task terminates before terminating all remaining tasks
213
+ # --kill-on-bad-exit=1: terminate a step if any task exits with a non-zero exit code
214
+ SRUN_ARGS=" \
215
+ --wait=60 \
216
+ --kill-on-bad-exit=1 \
217
+ "
218
+
219
+ clear; srun $SRUN_ARGS --jobid $SLURM_JOB_ID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt
220
+
221
+ echo "END TIME: $(date)"
train/tr3-1B3-baseline/README.md ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Arch/Scaling baselines (tr3)
2
+
3
+ This folder contains the training scripts for the architecture and scaling baseline runs: no fancy tricks, just GPT2. Here are links to the respective tensorboards:
4
+
5
+ | Size | 1B3 | 760M | 350M | 125M |
6
+ |--------------------- |----- |------ |------ |------ |
7
+ | C4 + low warmup | [a](https://huggingface.co/bigscience/tr3-1B3-modeling-baseline-tensorboard) | [b](https://huggingface.co/bigscience/tr3b-760M-modeling-baseline-tensorboard) | [c](https://huggingface.co/bigscience/tr3c-350M-modeling-baseline-tensorboard) | |
8
+ | OSCAR + low warmup | [f](https://huggingface.co/bigscience/tr3f-1B3-diagnostic2-low-warmup-oscar-tensorboard) | | | |
9
+ | C4 + high warmup | [e](https://huggingface.co/bigscience/tr3e-1B3-diagnostic1-warmup-c4-tensorboard) | | | |
10
+ | OSCAR + high warmup | **[d (current baseline)](https://huggingface.co/bigscience/tr3d-1B3-more-warmup-tensorboard)** | [g](https://huggingface.co/bigscience/tr3g-760M-v2-tensorboard) | [h](https://huggingface.co/bigscience/tr3h-350M-v2-tensorboard) | [i](https://huggingface.co/bigscience/tr3i-125M-v2-tensorboard) |
11
+ | Pile + high warmup | [m](https://huggingface.co/bigscience/tr3m-1B3-pile-tensorboard) | [j](https://huggingface.co/bigscience/tr3j-760M-pile-tensorboard) | [k](https://huggingface.co/bigscience/tr3k-350M-pile-tensorboard) | [l](https://huggingface.co/bigscience/tr3l-125M-pile-tensorboard) |
12
+
13
+
14
+
15
+ # emb-norm
16
+
17
+ a full re-run of `tr3m-1B3-pile-tensorboard` with `--embed-layernorm` enabled
18
+
19
+ [script](tr3m-1B3-emb-norm-pile.slurm)
20
+
21
+ results:
22
+
23
+ - added `emb-norm` to https://huggingface.co/bigscience/tr3m-1B3-pile-tensorboard/tensorboard and moved the original run to `base`. Also upgraded the old TB to the new format so the new graph names match.
24
+
25
+ - full standalone repo: https://huggingface.co/bigscience/tr3m-1B3-emb-norm-pile-logs/ with logs
26
+
27
+ - last checkpoint saved in `$six_ALL_CCFRSTORE/checkpoints/tr3m-1B3-emb-norm-pile`
train/tr3-1B3-baseline/tar_experiments.slurm ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tar_experiments # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --qos=qos_cpu-t3
9
+ #SBATCH --output=../%x-%j.out # output file name
10
+ #SBATCH --account=six@cpu
11
+ #SBATCH --partition=cpu_p1
12
+
13
+ for filename in *; do
14
+ tar -cvf "$filename.tar" "$filename"
15
+ mv "$filename.tar" $ALL_CCFRSTORE/arch_scaling_experiments_store/"$filename.tar"
16
+ done
train/tr3-1B3-baseline/tr3-1B3-modeling-baseline-hub-sync-logs.slurm ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr3-1B3-hub-sync-logs # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1 # number of nodes
5
+ #SBATCH --cpus-per-task=1 # number of cores per task
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --partition=prepost
10
+
11
+ echo "START TIME: $(date)"
12
+
13
+ module load git-lfs
14
+
15
+ DATA_OUTPUT_PATH=$SCRATCH/synched_exps/tr3-1B3-full
16
+ LOGS_PATH=$DATA_OUTPUT_PATH/logs
17
+ BIG_SCIENCE_REPO_PATH=$SCRATCH/bigscience
18
+
19
+ $BIG_SCIENCE_REPO_PATH/tools/hub-sync.py --repo-path $LOGS_PATH --patterns '*.out' -d
20
+
21
+ echo "END TIME: $(date)"
train/tr3-1B3-baseline/tr3-1B3-modeling-baseline-hub-sync-tensorboard.slurm ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr3-1B3-hub-sync-tensorboard # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1 # number of nodes
5
+ #SBATCH --cpus-per-task=1 # number of cores per task
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --partition=prepost
10
+
11
+ echo "START TIME: $(date)"
12
+
13
+ module load git-lfs
14
+
15
+ DATA_OUTPUT_PATH=$SCRATCH/synched_exps/tr3-1B3-full
16
+ TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard
17
+ BIG_SCIENCE_REPO_PATH=$SCRATCH/bigscience
18
+
19
+ $BIG_SCIENCE_REPO_PATH/tools/hub-sync.py --repo-path $TENSORBOARD_PATH --patterns '*tfevents*' -d
20
+
21
+ echo "END TIME: $(date)"
22
+
train/tr3-1B3-baseline/tr3-1B3-modeling-baseline.slurm ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=1B3-full.slurm
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=16
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --error=%x-%j.out # error file name (same to watch just one file)
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+
17
+ ROUND=2
18
+ TESTING=0
19
+
20
+ OUTPUT_PATH=$SCRATCH/synched_exps/tr3-1B3-full/
21
+ MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed
22
+
23
+ if [[ ${TESTING} == 1 ]]; then
24
+ # testing on 10k
25
+ DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/c4_preprocessing/c4_100k_text_document
26
+ else
27
+ # production on full 304M records
28
+ DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/c4_preprocessing/c4_en_train_text_document
29
+
30
+ fi
31
+
32
+ source $six_ALL_CCFRWORK/start-prod
33
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
34
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
35
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
36
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
37
+ export HF_DATASETS_OFFLINE=1
38
+ export TRANSFORMERS_OFFLINE=1
39
+ cd $MEGATRON_DEEPSPEED_REPO
40
+
41
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
42
+ MASTER_PORT=6000
43
+
44
+ # adjust depending on the number of the nodes
45
+
46
+ # XXX: edit me
47
+ GPUS_PER_NODE=4
48
+ NNODES=16
49
+ PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here
50
+ TP_SIZE=4 # always fixed to the size of a single node
51
+ DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer
52
+
53
+ MICRO_BATCH_SIZE=16
54
+ GLOBAL_BATCH_SIZE=1024
55
+ TRAIN_ITER=146_484_375
56
+
57
+ NLAYERS=24
58
+ NHIDDEN=2048
59
+ NHEADS=16
60
+ FFN_HIDDEN_SIZE=8192
61
+ SEQ_LEN=2048
62
+
63
+ if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10
64
+ elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500
65
+ else echo "invalid ROUND: $ROUND"
66
+ fi
67
+
68
+ OPTIMIZER_ARGS=" \
69
+ --optimizer adam \
70
+ --adam-beta1 0.9 \
71
+ --adam-beta2 0.999 \
72
+ --adam-eps 1e-8 \
73
+ --lr 1e-4 \
74
+ --min-lr 1e-5 \
75
+ --lr-decay-style cosine \
76
+ --lr-decay-samples 126_953_125 \
77
+ --lr-warmup-samples 183_105 \
78
+ --clip-grad 1.0 \
79
+ --weight-decay 1e-1 \
80
+ "
81
+
82
+ EXIT_OPTS=" \
83
+ --exit-duration-in-mins 1190 \
84
+ "
85
+
86
+ GPT_ARGS=" \
87
+ --num-layers $NLAYERS \
88
+ --hidden-size $NHIDDEN \
89
+ --num-attention-heads $NHEADS \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_ITER \
96
+ --tokenizer-type PretrainedFromHF \
97
+ --tokenizer-name-or-path t5-small \
98
+ --loss-scale 12 \
99
+ --clip-grad 1.0 \
100
+ --fp16 \
101
+ --checkpoint-activations \
102
+ $OPTIMIZER_ARGS \
103
+ $EXIT_OPTS \
104
+ "
105
+
106
+ OUTPUT_ARGS=" \
107
+ --log-interval 200 \
108
+ --save-interval $SAVE_INTERVAL \
109
+ --eval-interval 1000 \
110
+ --eval-iters 100 \
111
+ --tensorboard-dir $OUTPUT_PATH/tensorboard \
112
+ --tensorboard-queue-size 5 \
113
+ --log-timers-to-tensorboard \
114
+ --log-batch-size-to-tensorboard \
115
+ --log-validation-ppl-to-tensorboard \
116
+ "
117
+
118
+ ZERO_STAGE=1
119
+
120
+ config_json="./ds_config.$SLURM_JOBID.json"
121
+
122
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
123
+ cat <<EOT > $config_json
124
+ {
125
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
126
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
127
+ "gradient_clipping": 1.0,
128
+ "zero_optimization": {
129
+ "stage": $ZERO_STAGE
130
+ },
131
+ "fp16": {
132
+ "enabled": true,
133
+ "loss_scale": 0,
134
+ "loss_scale_window": 500,
135
+ "hysteresis": 2,
136
+ "min_loss_scale": 1,
137
+ "initial_scale_power": 12
138
+ },
139
+ "steps_per_print": 2000,
140
+ "wall_clock_breakdown": false
141
+ }
142
+ EOT
143
+
144
+
145
+ DEEPSPEED_ARGS=" \
146
+ --deepspeed \
147
+ --deepspeed_config ${config_json} \
148
+ --zero-stage ${ZERO_STAGE} \
149
+ --deepspeed-activation-checkpointing \
150
+ "
151
+
152
+ export LAUNCHER="python -u -m torch.distributed.launch \
153
+ --nproc_per_node $GPUS_PER_NODE \
154
+ --nnodes $NNODES \
155
+ --master_addr $MASTER_ADDR \
156
+ --master_port $MASTER_PORT \
157
+ "
158
+
159
+ export CMD=" \
160
+ `pwd`/pretrain_gpt.py \
161
+ --tensor-model-parallel-size $TP_SIZE \
162
+ --pipeline-model-parallel-size $PP_SIZE \
163
+ $GPT_ARGS \
164
+ $OUTPUT_ARGS \
165
+ --save $OUTPUT_PATH/checkpoints \
166
+ --load $OUTPUT_PATH/checkpoints \
167
+ --data-path $DATA_PATH \
168
+ --data-impl mmap \
169
+ --split 949,50,1 \
170
+ --distributed-backend nccl \
171
+ $DEEPSPEED_ARGS \
172
+ "
173
+
174
+
175
+ # # clear old checkpoint as it'd mismatch while we sort things out
176
+ # rm -rf $SAVE_CHECKPOINT_PATH
177
+
178
+
179
+ echo $CMD
180
+
181
+ # to debug - add echo (it exits and prints what it would have launched)
182
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr3-1B3-modeling-baseline.$SLURM_JOBID.out
train/tr3-1B3-baseline/tr3b-760M.slurm ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=760M.slurm
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=16
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --error=%x-%j.out # error file name (same to watch just one file)
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+
17
+ ROUND=2
18
+ TESTING=0
19
+
20
+ OUTPUT_PATH=$SCRATCH/synched_exps/tr3b-760M/
21
+ MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed
22
+
23
+ if [[ ${TESTING} == 1 ]]; then
24
+ # testing on 10k
25
+ DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/c4_preprocessing/c4_100k_text_document
26
+ else
27
+ # production on full 304M records
28
+ DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/c4_preprocessing/c4_en_train_text_document
29
+
30
+ fi
31
+
32
+ source $six_ALL_CCFRWORK/start-prod
33
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
34
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
35
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
36
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
37
+ export HF_DATASETS_OFFLINE=1
38
+ export TRANSFORMERS_OFFLINE=1
39
+ cd $MEGATRON_DEEPSPEED_REPO
40
+
41
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
42
+ MASTER_PORT=6000
43
+
44
+ # adjust depending on the number of the nodes
45
+
46
+ # XXX: edit me
47
+ GPUS_PER_NODE=4
48
+ NNODES=16
49
+ PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here
50
+ TP_SIZE=4 # always fixed to the size of a single node
51
+ DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer
52
+
53
+ MICRO_BATCH_SIZE=4
54
+ GLOBAL_BATCH_SIZE=256
55
+ TRAIN_ITER=146_484_375
56
+
57
+ NLAYERS=24
58
+ NHIDDEN=1536
59
+ NHEADS=16
60
+ FFN_HIDDEN_SIZE=6144
61
+ SEQ_LEN=2048
62
+
63
+ if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10
64
+ elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500
65
+ else echo "invalid ROUND: $ROUND"
66
+ fi
67
+
68
+ OPTIMIZER_ARGS=" \
69
+ --optimizer adam \
70
+ --adam-beta1 0.9 \
71
+ --adam-beta2 0.999 \
72
+ --adam-eps 1e-8 \
73
+ --lr 2.5e-4 \
74
+ --min-lr 1e-5 \
75
+ --lr-decay-style cosine \
76
+ --lr-decay-samples 126_953_125 \
77
+ --lr-warmup-samples 183_105 \
78
+ --clip-grad 1.0 \
79
+ --weight-decay 1e-1 \
80
+ "
81
+
82
+ EXIT_OPTS=" \
83
+ --exit-duration-in-mins 1190 \
84
+ "
85
+
86
+ GPT_ARGS=" \
87
+ --num-layers $NLAYERS \
88
+ --hidden-size $NHIDDEN \
89
+ --num-attention-heads $NHEADS \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_ITER \
96
+ --tokenizer-type PretrainedFromHF \
97
+ --tokenizer-name-or-path t5-small \
98
+ --loss-scale 12 \
99
+ --clip-grad 1.0 \
100
+ --fp16 \
101
+ $OPTIMIZER_ARGS \
102
+ $EXIT_OPTS \
103
+ "
104
+
105
+ OUTPUT_ARGS=" \
106
+ --log-interval 200 \
107
+ --save-interval $SAVE_INTERVAL \
108
+ --eval-interval 1000 \
109
+ --eval-iters 100 \
110
+ --tensorboard-dir $OUTPUT_PATH/tensorboard \
111
+ --tensorboard-queue-size 5 \
112
+ --log-timers-to-tensorboard \
113
+ --log-batch-size-to-tensorboard \
114
+ --log-validation-ppl-to-tensorboard \
115
+ "
116
+
117
+ ZERO_STAGE=1
118
+
119
+ config_json="./ds_config.$SLURM_JOBID.json"
120
+
121
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
122
+ cat <<EOT > $config_json
123
+ {
124
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
125
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
126
+ "gradient_clipping": 1.0,
127
+ "zero_optimization": {
128
+ "stage": $ZERO_STAGE
129
+ },
130
+ "fp16": {
131
+ "enabled": true,
132
+ "loss_scale": 0,
133
+ "loss_scale_window": 500,
134
+ "hysteresis": 2,
135
+ "min_loss_scale": 1,
136
+ "initial_scale_power": 12
137
+ },
138
+ "steps_per_print": 2000,
139
+ "wall_clock_breakdown": false
140
+ }
141
+ EOT
142
+
143
+
144
+ DEEPSPEED_ARGS=" \
145
+ --deepspeed \
146
+ --deepspeed_config ${config_json} \
147
+ --zero-stage ${ZERO_STAGE} \
148
+ "
149
+
150
+ export LAUNCHER="python -u -m torch.distributed.launch \
151
+ --nproc_per_node $GPUS_PER_NODE \
152
+ --nnodes $NNODES \
153
+ --master_addr $MASTER_ADDR \
154
+ --master_port $MASTER_PORT \
155
+ "
156
+
157
+ export CMD=" \
158
+ `pwd`/pretrain_gpt.py \
159
+ --tensor-model-parallel-size $TP_SIZE \
160
+ --pipeline-model-parallel-size $PP_SIZE \
161
+ $GPT_ARGS \
162
+ $OUTPUT_ARGS \
163
+ --save $OUTPUT_PATH/checkpoints \
164
+ --load $OUTPUT_PATH/checkpoints \
165
+ --data-path $DATA_PATH \
166
+ --data-impl mmap \
167
+ --split 949,50,1 \
168
+ --distributed-backend nccl \
169
+ $DEEPSPEED_ARGS \
170
+ "
171
+
172
+
173
+ # # clear old checkpoint as it'd mismatch while we sort things out
174
+ # rm -rf $SAVE_CHECKPOINT_PATH
175
+
176
+
177
+ echo $CMD
178
+
179
+ # to debug - add echo (it exits and prints what it would have launched)
180
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee OUTPUT_PATH/logs/tr3b-760M-modeling-baseline.$SLURM_JOBID.out
train/tr3-1B3-baseline/tr3c-350M.slurm ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=350M.slurm
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=4
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --error=%x-%j.out # error file name (same to watch just one file)
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+
17
+ ROUND=2
18
+ TESTING=0
19
+
20
+ OUTPUT_PATH=$SCRATCH/synched_exps/tr3c-350M/
21
+ MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed
22
+
23
+ if [[ ${TESTING} == 1 ]]; then
24
+ # testing on 10k
25
+ DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/c4_preprocessing/c4_100k_text_document
26
+ else
27
+ # production on full 304M records
28
+ DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/c4_preprocessing/c4_en_train_text_document
29
+
30
+ fi
31
+
32
+ source $six_ALL_CCFRWORK/start-prod
33
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
34
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
35
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
36
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
37
+ export HF_DATASETS_OFFLINE=1
38
+ export TRANSFORMERS_OFFLINE=1
39
+ cd $MEGATRON_DEEPSPEED_REPO
40
+
41
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
42
+ MASTER_PORT=6000
43
+
44
+ # adjust depending on the number of the nodes
45
+
46
+ # XXX: edit me
47
+ GPUS_PER_NODE=4
48
+ NNODES=4
49
+ PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here
50
+ TP_SIZE=4 # always fixed to the size of a single node
51
+ DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer
52
+
53
+ MICRO_BATCH_SIZE=4
54
+ GLOBAL_BATCH_SIZE=256
55
+ TRAIN_ITER=146_484_375
56
+
57
+ NLAYERS=24
58
+ NHIDDEN=1024
59
+ NHEADS=16
60
+ FFN_HIDDEN_SIZE=4096
61
+ SEQ_LEN=2048
62
+
63
+ if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10
64
+ elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500
65
+ else echo "invalid ROUND: $ROUND"
66
+ fi
67
+
68
+ OPTIMIZER_ARGS=" \
69
+ --optimizer adam \
70
+ --adam-beta1 0.9 \
71
+ --adam-beta2 0.999 \
72
+ --adam-eps 1e-8 \
73
+ --lr 3e-4 \
74
+ --min-lr 1e-5 \
75
+ --lr-decay-style cosine \
76
+ --lr-decay-samples 126_953_125 \
77
+ --lr-warmup-samples 183_105 \
78
+ --clip-grad 1.0 \
79
+ --weight-decay 1e-1 \
80
+ "
81
+
82
+ EXIT_OPTS=" \
83
+ --exit-duration-in-mins 1190 \
84
+ "
85
+
86
+ GPT_ARGS=" \
87
+ --num-layers $NLAYERS \
88
+ --hidden-size $NHIDDEN \
89
+ --num-attention-heads $NHEADS \
90
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
91
+ --seq-length $SEQ_LEN \
92
+ --max-position-embeddings $SEQ_LEN \
93
+ --micro-batch-size $MICRO_BATCH_SIZE \
94
+ --global-batch-size $GLOBAL_BATCH_SIZE \
95
+ --train-samples $TRAIN_ITER \
96
+ --tokenizer-type PretrainedFromHF \
97
+ --tokenizer-name-or-path t5-small \
98
+ --loss-scale 12 \
99
+ --clip-grad 1.0 \
100
+ --fp16 \
101
+ $OPTIMIZER_ARGS \
102
+ $EXIT_OPTS \
103
+ "
104
+
105
+ OUTPUT_ARGS=" \
106
+ --log-interval 200 \
107
+ --save-interval $SAVE_INTERVAL \
108
+ --eval-interval 1000 \
109
+ --eval-iters 100 \
110
+ --tensorboard-dir $OUTPUT_PATH/tensorboard \
111
+ --tensorboard-queue-size 5 \
112
+ --log-timers-to-tensorboard \
113
+ --log-batch-size-to-tensorboard \
114
+ --log-validation-ppl-to-tensorboard \
115
+ "
116
+
117
+ ZERO_STAGE=1
118
+
119
+ config_json="./ds_config.$SLURM_JOBID.json"
120
+
121
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
122
+ cat <<EOT > $config_json
123
+ {
124
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
125
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
126
+ "gradient_clipping": 1.0,
127
+ "zero_optimization": {
128
+ "stage": $ZERO_STAGE
129
+ },
130
+ "fp16": {
131
+ "enabled": true,
132
+ "loss_scale": 0,
133
+ "loss_scale_window": 500,
134
+ "hysteresis": 2,
135
+ "min_loss_scale": 1,
136
+ "initial_scale_power": 12
137
+ },
138
+ "steps_per_print": 2000,
139
+ "wall_clock_breakdown": false
140
+ }
141
+ EOT
142
+
143
+
144
+ DEEPSPEED_ARGS=" \
145
+ --deepspeed \
146
+ --deepspeed_config ${config_json} \
147
+ --zero-stage ${ZERO_STAGE} \
148
+ "
149
+
150
+ export LAUNCHER="python -u -m torch.distributed.launch \
151
+ --nproc_per_node $GPUS_PER_NODE \
152
+ --nnodes $NNODES \
153
+ --master_addr $MASTER_ADDR \
154
+ --master_port $MASTER_PORT \
155
+ "
156
+
157
+ export CMD=" \
158
+ `pwd`/pretrain_gpt.py \
159
+ --tensor-model-parallel-size $TP_SIZE \
160
+ --pipeline-model-parallel-size $PP_SIZE \
161
+ $GPT_ARGS \
162
+ $OUTPUT_ARGS \
163
+ --save $OUTPUT_PATH/checkpoints \
164
+ --load $OUTPUT_PATH/checkpoints \
165
+ --data-path $DATA_PATH \
166
+ --data-impl mmap \
167
+ --split 949,50,1 \
168
+ --distributed-backend nccl \
169
+ $DEEPSPEED_ARGS \
170
+ "
171
+
172
+
173
+ # # clear old checkpoint as it'd mismatch while we sort things out
174
+ # rm -rf $SAVE_CHECKPOINT_PATH
175
+
176
+
177
+ echo $CMD
178
+
179
+ # to debug - add echo (it exits and prints what it would have launched)
180
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee OUTPUT_PATH/logs/tr3c-350M-modeling-baseline.$SLURM_JOBID.out
train/tr3-1B3-baseline/tr3d-1B3-more-warmup.slurm ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=1B3-v2.slurm
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=16
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --error=%x-%j.out # error file name (same to watch just one file)
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+
17
+ ROUND=2
18
+ TESTING=0
19
+
20
+ OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3d-1B3-more-warmup/
21
+ MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed
22
+
23
+ VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json
24
+ MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt
25
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document
26
+
27
+ source $six_ALL_CCFRWORK/start-prod
28
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
29
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
30
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
31
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
32
+ export HF_DATASETS_OFFLINE=1
33
+ export TRANSFORMERS_OFFLINE=1
34
+ cd $MEGATRON_DEEPSPEED_REPO
35
+
36
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
37
+ MASTER_PORT=6000
38
+
39
+ # adjust depending on the number of the nodes
40
+
41
+ # XXX: edit me
42
+ GPUS_PER_NODE=4
43
+ NNODES=16
44
+ PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here
45
+ TP_SIZE=4 # always fixed to the size of a single node
46
+ DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer
47
+
48
+ MICRO_BATCH_SIZE=8
49
+ GLOBAL_BATCH_SIZE=512
50
+ TRAIN_ITER=73_242_187
51
+
52
+ NLAYERS=24
53
+ NHIDDEN=2048
54
+ NHEADS=16
55
+ FFN_HIDDEN_SIZE=8192
56
+ SEQ_LEN=2048
57
+
58
+ if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10
59
+ elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500
60
+ else echo "invalid ROUND: $ROUND"
61
+ fi
62
+
63
+ OPTIMIZER_ARGS=" \
64
+ --optimizer adam \
65
+ --adam-beta1 0.9 \
66
+ --adam-beta2 0.999 \
67
+ --adam-eps 1e-8 \
68
+ --lr 2e-4 \
69
+ --min-lr 1e-5 \
70
+ --lr-decay-style cosine \
71
+ --lr-decay-samples 73_242_187 \
72
+ --lr-warmup-samples 183_105 \
73
+ --clip-grad 1.0 \
74
+ --weight-decay 1e-1 \
75
+ "
76
+
77
+ EXIT_OPTS=" \
78
+ --exit-duration-in-mins 1190 \
79
+ "
80
+
81
+ GPT_ARGS=" \
82
+ --num-layers $NLAYERS \
83
+ --hidden-size $NHIDDEN \
84
+ --num-attention-heads $NHEADS \
85
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
86
+ --seq-length $SEQ_LEN \
87
+ --max-position-embeddings $SEQ_LEN \
88
+ --micro-batch-size $MICRO_BATCH_SIZE \
89
+ --global-batch-size $GLOBAL_BATCH_SIZE \
90
+ --rampup-batch-size 32 32 2_000_000 \
91
+ --train-samples $TRAIN_ITER \
92
+ --vocab-file $VOCAB_FILE \
93
+ --merge-file $MERGE_FILE \
94
+ --loss-scale 12 \
95
+ --clip-grad 1.0 \
96
+ --fp16 \
97
+ --checkpoint-activations \
98
+ $OPTIMIZER_ARGS \
99
+ $EXIT_OPTS \
100
+ "
101
+
102
+ OUTPUT_ARGS=" \
103
+ --log-interval 200 \
104
+ --save-interval $SAVE_INTERVAL \
105
+ --eval-interval 1000 \
106
+ --eval-iters 100 \
107
+ --tensorboard-dir $OUTPUT_PATH/tensorboard \
108
+ --tensorboard-queue-size 5 \
109
+ --log-timers-to-tensorboard \
110
+ --log-batch-size-to-tensorboard \
111
+ --log-validation-ppl-to-tensorboard \
112
+ "
113
+
114
+ ZERO_STAGE=1
115
+
116
+ config_json="./ds_config.$SLURM_JOBID.json"
117
+
118
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
119
+ cat <<EOT > $config_json
120
+ {
121
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
122
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
123
+ "gradient_clipping": 1.0,
124
+ "zero_optimization": {
125
+ "stage": $ZERO_STAGE
126
+ },
127
+ "fp16": {
128
+ "enabled": true,
129
+ "loss_scale": 0,
130
+ "loss_scale_window": 500,
131
+ "hysteresis": 2,
132
+ "min_loss_scale": 1,
133
+ "initial_scale_power": 12
134
+ },
135
+ "steps_per_print": 2000,
136
+ "wall_clock_breakdown": false
137
+ }
138
+ EOT
139
+
140
+
141
+ DEEPSPEED_ARGS=" \
142
+ --deepspeed \
143
+ --deepspeed_config ${config_json} \
144
+ --zero-stage ${ZERO_STAGE} \
145
+ --deepspeed-activation-checkpointing \
146
+ "
147
+
148
+ export LAUNCHER="python -u -m torch.distributed.launch \
149
+ --nproc_per_node $GPUS_PER_NODE \
150
+ --nnodes $NNODES \
151
+ --master_addr $MASTER_ADDR \
152
+ --master_port $MASTER_PORT \
153
+ "
154
+
155
+ export CMD=" \
156
+ `pwd`/pretrain_gpt.py \
157
+ --tensor-model-parallel-size $TP_SIZE \
158
+ --pipeline-model-parallel-size $PP_SIZE \
159
+ $GPT_ARGS \
160
+ $OUTPUT_ARGS \
161
+ --save $OUTPUT_PATH/checkpoints \
162
+ --load $OUTPUT_PATH/checkpoints \
163
+ --data-path $DATA_PATH \
164
+ --data-impl mmap \
165
+ --split 949,50,1 \
166
+ --distributed-backend nccl \
167
+ $DEEPSPEED_ARGS \
168
+ "
169
+
170
+
171
+ # # clear old checkpoint as it'd mismatch while we sort things out
172
+ # rm -rf $SAVE_CHECKPOINT_PATH
173
+
174
+
175
+ echo $CMD
176
+
177
+ # to debug - add echo (it exits and prints what it would have launched)
178
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr3d-1B3-more-warmup.$SLURM_JOBID.out
train/tr3-1B3-baseline/tr3d-1B3-oscar-training2.slurm ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr3d-1B3-oscar.slurm
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=16
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --error=%x-%j.out # error file name (same to watch just one file)
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+
17
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3d-1B3-oscar
18
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints
19
+ REPO_PATH=$DATA_OUTPUT_PATH/tr3d-1B3-oscar-logs
20
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard
21
+ CODECARBON_PATH=$REPO_PATH/codecarbon
22
+ LOGS_PATH=$REPO_PATH/logs
23
+ # You need to git clone the Megatron-DeepSpeed
24
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/Megatron-DeepSpeed
25
+
26
+ VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json
27
+ MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt
28
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document
29
+
30
+ # defining the right environment variables
31
+ source $six_ALL_CCFRWORK/start-prod
32
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
33
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
34
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
35
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
36
+ export HF_DATASETS_OFFLINE=1
37
+ export TRANSFORMERS_OFFLINE=1
38
+ cd $MEGATRON_DEEPSPEED_REPO
39
+
40
+ # testing for potential faulty nodes
41
+ srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
42
+
43
+ # so processes know who to talk to
44
+ MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1`
45
+ MASTER_PORT=6000
46
+
47
+ GPUS_PER_NODE=4
48
+ NNODES=16
49
+ PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here
50
+ TP_SIZE=1 # always fixed to the size of a single node
51
+ DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer
52
+
53
+ MICRO_BATCH_SIZE=1
54
+ GLOBAL_BATCH_SIZE=512
55
+ TRAIN_ITER=146_484_375
56
+
57
+ NLAYERS=24
58
+ NHIDDEN=2048
59
+ NHEADS=16
60
+ FFN_HIDDEN_SIZE=8192
61
+ SEQ_LEN=2048
62
+
63
+ SAVE_INTERVAL=1500
64
+
65
+ OPTIMIZER_ARGS=" \
66
+ --optimizer adam \
67
+ --adam-beta1 0.9 \
68
+ --adam-beta2 0.999 \
69
+ --adam-eps 1e-8 \
70
+ --lr 2e-4 \
71
+ --min-lr 1e-5 \
72
+ --lr-decay-style cosine \
73
+ --lr-decay-samples 73_242_187 \
74
+ --lr-warmup-samples 183_105 \
75
+ --clip-grad 1.0 \
76
+ --weight-decay 1e-1 \
77
+ "
78
+
79
+ EXIT_OPTS=" \
80
+ --exit-duration-in-mins 1190 \
81
+ "
82
+
83
+ GPT_ARGS=" \
84
+ --num-layers $NLAYERS \
85
+ --hidden-size $NHIDDEN \
86
+ --num-attention-heads $NHEADS \
87
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
88
+ --seq-length $SEQ_LEN \
89
+ --max-position-embeddings $SEQ_LEN \
90
+ --micro-batch-size $MICRO_BATCH_SIZE \
91
+ --global-batch-size $GLOBAL_BATCH_SIZE \
92
+ --rampup-batch-size 32 32 2_000_000 \
93
+ --train-samples $TRAIN_ITER \
94
+ --vocab-file $VOCAB_FILE \
95
+ --merge-file $MERGE_FILE \
96
+ --loss-scale 12 \
97
+ --clip-grad 1.0 \
98
+ --fp16 \
99
+ --checkpoint-activations \
100
+ $OPTIMIZER_ARGS \
101
+ $EXIT_OPTS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 200 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 1000 \
108
+ --eval-iters 100 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+ # TODO: Add --codecarbon-dir $CODECARBON_PATH \ if you want to use codecarbon, not adding it for now to make the current
116
+ # series of experiments consistent, especially speed-wise. Adding it once Tr6 and Tr7 are done
117
+
118
+ ZERO_STAGE=1
119
+
120
+ config_json="./ds_config.$SLURM_JOBID.json"
121
+
122
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
123
+ cat <<EOT > $config_json
124
+ {
125
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
126
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
127
+ "gradient_clipping": 1.0,
128
+ "zero_optimization": {
129
+ "stage": $ZERO_STAGE
130
+ },
131
+ "fp16": {
132
+ "enabled": true,
133
+ "loss_scale": 0,
134
+ "loss_scale_window": 500,
135
+ "hysteresis": 2,
136
+ "min_loss_scale": 1,
137
+ "initial_scale_power": 12
138
+ },
139
+ "steps_per_print": 2000,
140
+ "wall_clock_breakdown": false
141
+ }
142
+ EOT
143
+
144
+
145
+ DEEPSPEED_ARGS=" \
146
+ --deepspeed \
147
+ --deepspeed_config ${config_json} \
148
+ --zero-stage ${ZERO_STAGE} \
149
+ --deepspeed-activation-checkpointing \
150
+ "
151
+
152
+ export LAUNCHER="python -u -m torch.distributed.launch \
153
+ --nproc_per_node $GPUS_PER_NODE \
154
+ --nnodes $NNODES \
155
+ --master_addr $MASTER_ADDR \
156
+ --master_port $MASTER_PORT \
157
+ "
158
+
159
+ export CMD=" \
160
+ `pwd`/pretrain_gpt.py \
161
+ --tensor-model-parallel-size $TP_SIZE \
162
+ --pipeline-model-parallel-size $PP_SIZE \
163
+ $GPT_ARGS \
164
+ $OUTPUT_ARGS \
165
+ --save $CHECKPOINT_PATH \
166
+ --load $CHECKPOINT_PATH \
167
+ --data-path $DATA_PATH \
168
+ --data-impl mmap \
169
+ --split 949,50,1 \
170
+ --distributed-backend nccl \
171
+ $DEEPSPEED_ARGS \
172
+ "
173
+
174
+
175
+ # # clear old checkpoint as it'd mismatch while we sort things out
176
+ # rm -rf $SAVE_CHECKPOINT_PATH
177
+
178
+
179
+ echo $CMD
180
+
181
+ # We create the folder where the logs and codecarbon will be stored.
182
+ mkdir -p $LOGS_PATH
183
+ # to debug - add echo (it exits and prints what it would have launched)
184
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt
train/tr3-1B3-baseline/tr3e-1B3-c4-training2.slurm ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr3e-1B3-c4.slurm
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=16
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --error=%x-%j.out # error file name (same to watch just one file)
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+ DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3e-1B3-c4
17
+ CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints
18
+ REPO_PATH=$DATA_OUTPUT_PATH/tr3e-1B3-c4-logs
19
+ TENSORBOARD_PATH=$REPO_PATH/tensorboard
20
+ CODECARBON_PATH=$REPO_PATH/codecarbon
21
+ LOGS_PATH=$REPO_PATH/logs
22
+ # You need to git clone the Megatron-DeepSpeed
23
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/Megatron-DeepSpeed
24
+
25
+ VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json
26
+ MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt
27
+ DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/c4_preprocessing/c4_en_train_text_document
28
+
29
+ # defining the right environment variables
30
+ source $six_ALL_CCFRWORK/start-prod
31
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
32
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
33
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
34
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
35
+ export HF_DATASETS_OFFLINE=1
36
+ export TRANSFORMERS_OFFLINE=1
37
+ cd $MEGATRON_DEEPSPEED_REPO
38
+
39
+ # testing for potential faulty nodes
40
+ srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"'
41
+
42
+ # so processes know who to talk to
43
+ MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1`
44
+ MASTER_PORT=6000
45
+
46
+ # TODO: this is our base config for 1B3, edit PP/TP/batch size/model config if smaller or bigger
47
+ GPUS_PER_NODE=4
48
+ NNODES=16
49
+ PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here
50
+ TP_SIZE=1 # always fixed to the size of a single node
51
+ DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer
52
+
53
+ MICRO_BATCH_SIZE=1
54
+ GLOBAL_BATCH_SIZE=512
55
+ TRAIN_ITER=146_484_375
56
+
57
+ NLAYERS=24
58
+ NHIDDEN=2048
59
+ NHEADS=16
60
+ FFN_HIDDEN_SIZE=8192
61
+ SEQ_LEN=2048
62
+
63
+ SAVE_INTERVAL=1500
64
+
65
+ OPTIMIZER_ARGS=" \
66
+ --optimizer adam \
67
+ --adam-beta1 0.9 \
68
+ --adam-beta2 0.999 \
69
+ --adam-eps 1e-8 \
70
+ --lr 2e-4 \
71
+ --min-lr 1e-5 \
72
+ --lr-decay-style cosine \
73
+ --lr-decay-samples 73_242_187 \
74
+ --lr-warmup-samples 183_105 \
75
+ --clip-grad 1.0 \
76
+ --weight-decay 1e-1 \
77
+ "
78
+
79
+ EXIT_OPTS=" \
80
+ --exit-duration-in-mins 1190 \
81
+ "
82
+
83
+ GPT_ARGS=" \
84
+ --num-layers $NLAYERS \
85
+ --hidden-size $NHIDDEN \
86
+ --num-attention-heads $NHEADS \
87
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
88
+ --seq-length $SEQ_LEN \
89
+ --max-position-embeddings $SEQ_LEN \
90
+ --micro-batch-size $MICRO_BATCH_SIZE \
91
+ --global-batch-size $GLOBAL_BATCH_SIZE \
92
+ --rampup-batch-size 32 32 2_000_000 \
93
+ --train-samples $TRAIN_ITER \
94
+ --vocab-file $VOCAB_FILE \
95
+ --merge-file $MERGE_FILE \
96
+ --loss-scale 12 \
97
+ --clip-grad 1.0 \
98
+ --fp16 \
99
+ --checkpoint-activations \
100
+ $OPTIMIZER_ARGS \
101
+ $EXIT_OPTS \
102
+ "
103
+
104
+ OUTPUT_ARGS=" \
105
+ --log-interval 200 \
106
+ --save-interval $SAVE_INTERVAL \
107
+ --eval-interval 1000 \
108
+ --eval-iters 100 \
109
+ --tensorboard-dir $TENSORBOARD_PATH \
110
+ --tensorboard-queue-size 5 \
111
+ --log-timers-to-tensorboard \
112
+ --log-batch-size-to-tensorboard \
113
+ --log-validation-ppl-to-tensorboard \
114
+ "
115
+ # TODO: Add --codecarbon-dir $CODECARBON_PATH \ if you want to use codecarbon, not adding it for now to make the current
116
+ # series of experiments consistent, especially speed-wise. Adding it once Tr6 and Tr7 are done
117
+
118
+ ZERO_STAGE=1
119
+
120
+ config_json="./ds_config.$SLURM_JOBID.json"
121
+
122
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
123
+ cat <<EOT > $config_json
124
+ {
125
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
126
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
127
+ "gradient_clipping": 1.0,
128
+ "zero_optimization": {
129
+ "stage": $ZERO_STAGE
130
+ },
131
+ "fp16": {
132
+ "enabled": true,
133
+ "loss_scale": 0,
134
+ "loss_scale_window": 500,
135
+ "hysteresis": 2,
136
+ "min_loss_scale": 1,
137
+ "initial_scale_power": 12
138
+ },
139
+ "steps_per_print": 2000,
140
+ "wall_clock_breakdown": false
141
+ }
142
+ EOT
143
+
144
+
145
+ DEEPSPEED_ARGS=" \
146
+ --deepspeed \
147
+ --deepspeed_config ${config_json} \
148
+ --zero-stage ${ZERO_STAGE} \
149
+ --deepspeed-activation-checkpointing \
150
+ "
151
+
152
+ export LAUNCHER="python -u -m torch.distributed.launch \
153
+ --nproc_per_node $GPUS_PER_NODE \
154
+ --nnodes $NNODES \
155
+ --master_addr $MASTER_ADDR \
156
+ --master_port $MASTER_PORT \
157
+ "
158
+
159
+ export CMD=" \
160
+ `pwd`/pretrain_gpt.py \
161
+ --tensor-model-parallel-size $TP_SIZE \
162
+ --pipeline-model-parallel-size $PP_SIZE \
163
+ $GPT_ARGS \
164
+ $OUTPUT_ARGS \
165
+ --save $CHECKPOINT_PATH \
166
+ --load $CHECKPOINT_PATH \
167
+ --data-path $DATA_PATH \
168
+ --data-impl mmap \
169
+ --split 949,50,1 \
170
+ --distributed-backend nccl \
171
+ $DEEPSPEED_ARGS \
172
+ "
173
+
174
+
175
+ # # clear old checkpoint as it'd mismatch while we sort things out
176
+ # rm -rf $SAVE_CHECKPOINT_PATH
177
+
178
+
179
+ echo $CMD
180
+
181
+ # We create the folder where the logs and codecarbon will be stored.
182
+ mkdir -p $LOGS_PATH
183
+ # to debug - add echo (it exits and prints what it would have launched)
184
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt
train/tr3-1B3-baseline/tr3e-1B3-diagnostic1-warmup-c4.slurm ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=1B3-diagnostic1.slurm
3
+ #SBATCH --qos=qos_gpu-t3
4
+ #SBATCH --nodes=16
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:4 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --error=%x-%j.out # error file name (same to watch just one file)
12
+ #SBATCH --account=six@v100
13
+
14
+ set -x -e
15
+
16
+
17
+ ROUND=2
18
+ TESTING=0
19
+
20
+ OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3e-1B3-diagnostic1-warmup-c4
21
+ MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed
22
+
23
+ DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/c4_preprocessing/c4_en_train_text_document
24
+
25
+ source $six_ALL_CCFRWORK/start-prod
26
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
27
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
28
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
29
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
30
+ export HF_DATASETS_OFFLINE=1
31
+ export TRANSFORMERS_OFFLINE=1
32
+ cd $MEGATRON_DEEPSPEED_REPO
33
+
34
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
35
+ MASTER_PORT=6000
36
+
37
+ # adjust depending on the number of the nodes
38
+
39
+ # XXX: edit me
40
+ GPUS_PER_NODE=4
41
+ NNODES=16
42
+ PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here
43
+ TP_SIZE=4 # always fixed to the size of a single node
44
+ DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer
45
+
46
+ MICRO_BATCH_SIZE=8
47
+ GLOBAL_BATCH_SIZE=512
48
+ TRAIN_ITER=73_242_187
49
+
50
+ NLAYERS=24
51
+ NHIDDEN=2048
52
+ NHEADS=16
53
+ FFN_HIDDEN_SIZE=8192
54
+ SEQ_LEN=2048
55
+
56
+ if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10
57
+ elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500
58
+ else echo "invalid ROUND: $ROUND"
59
+ fi
60
+
61
+ OPTIMIZER_ARGS=" \
62
+ --optimizer adam \
63
+ --adam-beta1 0.9 \
64
+ --adam-beta2 0.999 \
65
+ --adam-eps 1e-8 \
66
+ --lr 2e-4 \
67
+ --min-lr 1e-5 \
68
+ --lr-decay-style cosine \
69
+ --lr-decay-samples 73_242_187 \
70
+ --lr-warmup-samples 183_105 \
71
+ --clip-grad 1.0 \
72
+ --weight-decay 1e-1 \
73
+ "
74
+
75
+ EXIT_OPTS=" \
76
+ --exit-duration-in-mins 1190 \
77
+ "
78
+
79
+ GPT_ARGS=" \
80
+ --num-layers $NLAYERS \
81
+ --hidden-size $NHIDDEN \
82
+ --num-attention-heads $NHEADS \
83
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
84
+ --seq-length $SEQ_LEN \
85
+ --max-position-embeddings $SEQ_LEN \
86
+ --micro-batch-size $MICRO_BATCH_SIZE \
87
+ --global-batch-size $GLOBAL_BATCH_SIZE \
88
+ --rampup-batch-size 32 32 2_000_000 \
89
+ --train-samples $TRAIN_ITER \
90
+ --tokenizer-type PretrainedFromHF \
91
+ --tokenizer-name-or-path t5-small \
92
+ --loss-scale 12 \
93
+ --clip-grad 1.0 \
94
+ --fp16 \
95
+ --checkpoint-activations \
96
+ $OPTIMIZER_ARGS \
97
+ $EXIT_OPTS \
98
+ "
99
+
100
+ OUTPUT_ARGS=" \
101
+ --log-interval 200 \
102
+ --save-interval $SAVE_INTERVAL \
103
+ --eval-interval 1000 \
104
+ --eval-iters 100 \
105
+ --tensorboard-dir $OUTPUT_PATH/tensorboard \
106
+ --tensorboard-queue-size 5 \
107
+ --log-timers-to-tensorboard \
108
+ --log-batch-size-to-tensorboard \
109
+ --log-validation-ppl-to-tensorboard \
110
+ "
111
+
112
+ ZERO_STAGE=1
113
+
114
+ config_json="./ds_config.$SLURM_JOBID.json"
115
+
116
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
117
+ cat <<EOT > $config_json
118
+ {
119
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
120
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
121
+ "gradient_clipping": 1.0,
122
+ "zero_optimization": {
123
+ "stage": $ZERO_STAGE
124
+ },
125
+ "fp16": {
126
+ "enabled": true,
127
+ "loss_scale": 0,
128
+ "loss_scale_window": 500,
129
+ "hysteresis": 2,
130
+ "min_loss_scale": 1,
131
+ "initial_scale_power": 12
132
+ },
133
+ "steps_per_print": 2000,
134
+ "wall_clock_breakdown": false
135
+ }
136
+ EOT
137
+
138
+
139
+ DEEPSPEED_ARGS=" \
140
+ --deepspeed \
141
+ --deepspeed_config ${config_json} \
142
+ --zero-stage ${ZERO_STAGE} \
143
+ --deepspeed-activation-checkpointing \
144
+ "
145
+
146
+ export LAUNCHER="python -u -m torch.distributed.launch \
147
+ --nproc_per_node $GPUS_PER_NODE \
148
+ --nnodes $NNODES \
149
+ --master_addr $MASTER_ADDR \
150
+ --master_port $MASTER_PORT \
151
+ "
152
+
153
+ export CMD=" \
154
+ `pwd`/pretrain_gpt.py \
155
+ --tensor-model-parallel-size $TP_SIZE \
156
+ --pipeline-model-parallel-size $PP_SIZE \
157
+ $GPT_ARGS \
158
+ $OUTPUT_ARGS \
159
+ --save $OUTPUT_PATH/checkpoints \
160
+ --load $OUTPUT_PATH/checkpoints \
161
+ --data-path $DATA_PATH \
162
+ --data-impl mmap \
163
+ --split 949,50,1 \
164
+ --distributed-backend nccl \
165
+ $DEEPSPEED_ARGS \
166
+ "
167
+
168
+
169
+ # # clear old checkpoint as it'd mismatch while we sort things out
170
+ # rm -rf $SAVE_CHECKPOINT_PATH
171
+
172
+
173
+ echo $CMD
174
+
175
+ # to debug - add echo (it exits and prints what it would have launched)
176
+ srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr3e-1B3-diagnostic1-warmup-c4.$SLURM_JOBID.out