diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000000000000000000000000000000000000..68a2334c8fc92c03e01ed53eaed2121ca3efe96f --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,15 @@ +* Big Science version: +* Python version: +* Operating System: + +### Description + +Describe what you were trying to get done. +Tell us what happened, what went wrong, and what you expected to happen. + +### What I Did + +``` +Paste the command(s) you ran and the output. +If there was a crash, please include the traceback here. +``` diff --git a/experiments/bandwidth/all_reduce_bench-32gb-n4.slurm b/experiments/bandwidth/all_reduce_bench-32gb-n4.slurm new file mode 100644 index 0000000000000000000000000000000000000000..cd37c83832b34bfd6af298af73f63cf8f64333b8 --- /dev/null +++ b/experiments/bandwidth/all_reduce_bench-32gb-n4.slurm @@ -0,0 +1,20 @@ +#!/bin/bash +#SBATCH --job-name=all_reduce_bench-32gb-n4 +#SBATCH --constraint=v100-32g +#SBATCH --nodes=4 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@gpu + +export LOG_FILE=all_reduce_bench-32gb-n4.txt +export NNODES=4 +export GPUS_PER_NODE=4 +export NCCL_DEBUG=info + +export MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) + +srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.launch --nnodes $NNODES --nproc_per_node $GPUS_PER_NODE --node_rank $SLURM_PROCID --master_addr $MASTER_ADDR --master_port 12345 all_reduce_bench.py' 2>&1 | tee $LOG_FILE diff --git a/experiments/bandwidth/all_reduce_bench-32gb-n4.txt b/experiments/bandwidth/all_reduce_bench-32gb-n4.txt new file mode 100644 index 0000000000000000000000000000000000000000..628990ce61f2b741a0000d95ff8436f2c332126a --- /dev/null +++ b/experiments/bandwidth/all_reduce_bench-32gb-n4.txt @@ -0,0 +1,850 @@ +/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use_env is set by default in torchrun. +If your script expects `--local_rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + warnings.warn( +/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use_env is set by default in torchrun. +If your script expects `--local_rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + warnings.warn( +/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use_env is set by default in torchrun. +If your script expects `--local_rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + warnings.warn( +WARNING:torch.distributed.run: +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use_env is set by default in torchrun. +If your script expects `--local_rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + warnings.warn( +WARNING:torch.distributed.run: +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +WARNING:torch.distributed.run: +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +WARNING:torch.distributed.run: +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +local_rank: 0 +local_rank: 0 +local_rank: 2 +local_rank: 1 +local_rank: 1 +local_rank: 3 +local_rank: 3 +local_rank: 2 +local_rank: 1 +local_rank: 3 +local_rank: 2 +local_rank: 0 +local_rank: 0 +local_rank: 1 +local_rank: 3 +local_rank: 2 +0 data size: 4.0 GB +4 data size: 4.0 GB +6 data size: 4.0 GB +11 data size: 4.0 GB +3 data size: 4.0 GB +10 data size: 4.0 GB +7 data size: 4.0 GB +5 data size: 4.0 GB +1 data size: 4.0 GB +2 data size: 4.0 GB +8 data size: 4.0 GB +15 data size: 4.0 GB +13 data size: 4.0 GB +12 data size: 4.0 GB +14 data size: 4.0 GB +9 data size: 4.0 GB +r6i6n4:257714:257714 [0] NCCL INFO Bootstrap : Using ib0:10.148.7.175<0> +r6i6n4:257714:257714 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r6i6n4:257714:257714 [0] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.7.175<0> +r6i6n4:257714:257714 [0] NCCL INFO Using network IB +NCCL version 2.10.3+cuda11.3 +r6i6n4:257715:257715 [1] NCCL INFO Bootstrap : Using ib0:10.148.7.175<0> +r6i6n5:378203:378203 [3] NCCL INFO Bootstrap : Using ib0:10.148.7.176<0> +r6i6n5:378202:378202 [2] NCCL INFO Bootstrap : Using ib0:10.148.7.176<0> +r6i6n5:378201:378201 [1] NCCL INFO Bootstrap : Using ib0:10.148.7.176<0> +r6i6n4:257715:257715 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r6i6n5:378202:378202 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r6i6n5:378203:378203 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r6i6n5:378201:378201 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r6i6n4:257715:257715 [1] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.7.175<0> +r6i6n4:257715:257715 [1] NCCL INFO Using network IB +r6i6n5:378203:378203 [3] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.7.176<0> +r6i6n5:378202:378202 [2] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.7.176<0> +r6i6n5:378203:378203 [3] NCCL INFO Using network IB +r6i6n5:378202:378202 [2] NCCL INFO Using network IB +r6i6n5:378201:378201 [1] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.7.176<0> +r6i6n5:378201:378201 [1] NCCL INFO Using network IB +r6i6n4:257717:257717 [3] NCCL INFO Bootstrap : Using ib0:10.148.7.175<0> +r6i6n4:257717:257717 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r6i6n4:257717:257717 [3] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.7.175<0> +r6i6n4:257717:257717 [3] NCCL INFO Using network IB +r6i6n4:257716:257716 [2] NCCL INFO Bootstrap : Using ib0:10.148.7.175<0> +r6i6n4:257716:257716 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r6i6n4:257716:257716 [2] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.7.175<0> +r6i6n4:257716:257716 [2] NCCL INFO Using network IB +r6i6n5:378200:378200 [0] NCCL INFO Bootstrap : Using ib0:10.148.7.176<0> +r6i6n5:378200:378200 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r6i6n5:378200:378200 [0] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.7.176<0> +r6i6n5:378200:378200 [0] NCCL INFO Using network IB +r7i6n2:1370349:1370349 [2] NCCL INFO Bootstrap : Using ib0:10.148.0.95<0> +r7i6n2:1370348:1370348 [1] NCCL INFO Bootstrap : Using ib0:10.148.0.95<0> +r7i6n2:1370348:1370348 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i6n2:1370349:1370349 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i6n2:1370349:1370349 [2] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.0.95<0> +r7i6n2:1370349:1370349 [2] NCCL INFO Using network IB +r7i6n2:1370348:1370348 [1] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.0.95<0> +r7i6n2:1370348:1370348 [1] NCCL INFO Using network IB +r7i6n3:610063:610063 [0] NCCL INFO Bootstrap : Using ib0:10.148.0.96<0> +r7i6n3:610066:610066 [3] NCCL INFO Bootstrap : Using ib0:10.148.0.96<0> +r7i6n3:610063:610063 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i6n3:610066:610066 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i6n3:610066:610066 [3] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.0.96<0> +r7i6n3:610066:610066 [3] NCCL INFO Using network IB +r7i6n3:610063:610063 [0] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.0.96<0> +r7i6n3:610063:610063 [0] NCCL INFO Using network IB +r7i6n2:1370347:1370347 [0] NCCL INFO Bootstrap : Using ib0:10.148.0.95<0> +r7i6n2:1370347:1370347 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i6n2:1370347:1370347 [0] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.0.95<0> +r7i6n2:1370347:1370347 [0] NCCL INFO Using network IB +r7i6n3:610065:610065 [2] NCCL INFO Bootstrap : Using ib0:10.148.0.96<0> +r7i6n3:610065:610065 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i6n3:610064:610064 [1] NCCL INFO Bootstrap : Using ib0:10.148.0.96<0> +r7i6n3:610065:610065 [2] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.0.96<0> +r7i6n3:610065:610065 [2] NCCL INFO Using network IB +r7i6n3:610064:610064 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i6n3:610064:610064 [1] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.0.96<0> +r7i6n3:610064:610064 [1] NCCL INFO Using network IB +r7i6n2:1370350:1370350 [3] NCCL INFO Bootstrap : Using ib0:10.148.0.95<0> +r7i6n2:1370350:1370350 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i6n2:1370350:1370350 [3] NCCL INFO NET/IB : Using [0]hfi1_0:1/IB [1]hfi1_1:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.0.95<0> +r7i6n2:1370350:1370350 [3] NCCL INFO Using network IB +r7i6n3:610064:610133 [1] NCCL INFO Trees [0] 14/-1/-1->13->12 [1] -1/-1/-1->13->12 [2] 14/-1/-1->13->12 [3] -1/-1/-1->13->12 +r7i6n3:610065:610128 [2] NCCL INFO Trees [0] 15/-1/-1->14->13 [1] 15/-1/-1->14->10 [2] 15/-1/-1->14->13 [3] 15/6/-1->14->-1 +r7i6n3:610066:610123 [3] NCCL INFO Trees [0] -1/-1/-1->15->14 [1] 12/-1/-1->15->14 [2] -1/-1/-1->15->14 [3] 12/-1/-1->15->14 +r6i6n4:257714:257762 [0] NCCL INFO Channel 00/04 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 +r6i6n4:257714:257762 [0] NCCL INFO Channel 01/04 : 0 3 2 1 4 7 6 5 8 11 10 9 12 15 14 13 +r6i6n4:257714:257762 [0] NCCL INFO Channel 02/04 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 +r6i6n4:257714:257762 [0] NCCL INFO Channel 03/04 : 0 3 2 1 4 7 6 5 8 11 10 9 12 15 14 13 +r6i6n4:257714:257762 [0] NCCL INFO Trees [0] 1/8/-1->0->-1 [1] 1/-1/-1->0->3 [2] 1/-1/-1->0->4 [3] 1/-1/-1->0->3 +r6i6n4:257716:257777 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/10/-1->2->-1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->6 +r6i6n4:257715:257767 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] -1/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] -1/-1/-1->1->0 +r7i6n3:610063:610122 [0] NCCL INFO Trees [0] 13/-1/-1->12->8 [1] 13/-1/-1->12->15 [2] 13/4/-1->12->-1 [3] 13/-1/-1->12->15 +r6i6n4:257714:257762 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r6i6n4:257715:257767 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r6i6n4:257717:257772 [3] NCCL INFO Trees [0] -1/-1/-1->3->2 [1] 0/-1/-1->3->2 [2] -1/-1/-1->3->2 [3] 0/-1/-1->3->2 +r6i6n4:257716:257777 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i6n2:1370350:1370452 [3] NCCL INFO Trees [0] -1/-1/-1->11->10 [1] 8/6/-1->11->10 [2] -1/-1/-1->11->10 [3] 8/-1/-1->11->10 +r7i6n2:1370349:1370433 [2] NCCL INFO Trees [0] 11/-1/-1->10->9 [1] 11/14/-1->10->2 [2] 11/-1/-1->10->9 [3] 11/-1/-1->10->7 +r6i6n4:257717:257772 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i6n2:1370347:1370447 [0] NCCL INFO Trees [0] 9/12/-1->8->0 [1] 9/-1/-1->8->11 [2] 9/-1/-1->8->5 [3] 9/-1/-1->8->11 +r7i6n2:1370348:1370434 [1] NCCL INFO Trees [0] 10/4/-1->9->8 [1] -1/-1/-1->9->8 [2] 10/-1/-1->9->8 [3] -1/-1/-1->9->8 +r6i6n5:378202:378256 [2] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->11 [2] 7/-1/-1->6->5 [3] 7/2/-1->6->14 +r6i6n5:378203:378255 [3] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] 4/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] 4/10/-1->7->6 +r6i6n5:378201:378257 [1] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] -1/-1/-1->5->4 [2] 6/8/-1->5->4 [3] -1/-1/-1->5->4 +r6i6n5:378200:378262 [0] NCCL INFO Trees [0] 5/-1/-1->4->9 [1] 5/-1/-1->4->7 [2] 5/0/-1->4->12 [3] 5/-1/-1->4->7 +r6i6n5:378202:378256 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r6i6n5:378203:378255 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r6i6n5:378201:378257 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r6i6n5:378200:378262 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i6n3:610064:610133 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i6n3:610065:610128 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i6n3:610066:610123 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i6n3:610063:610122 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i6n2:1370350:1370452 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i6n2:1370349:1370433 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i6n2:1370347:1370447 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i6n2:1370348:1370434 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i6n3:610064:610133 [1] NCCL INFO Channel 00 : 13[1c000] -> 14[88000] via P2P/IPC +r6i6n5:378201:378257 [1] NCCL INFO Channel 00 : 5[1c000] -> 6[88000] via P2P/IPC +r7i6n3:610064:610133 [1] NCCL INFO Channel 02 : 13[1c000] -> 14[88000] via P2P/IPC +r6i6n4:257715:257767 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i6n3:610065:610128 [2] NCCL INFO Channel 00 : 14[88000] -> 15[8a000] via P2P/IPC +r6i6n4:257716:257777 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r6i6n4:257715:257767 [1] NCCL INFO Channel 02 : 1[1c000] -> 2[88000] via P2P/IPC +r6i6n5:378201:378257 [1] NCCL INFO Channel 02 : 5[1c000] -> 6[88000] via P2P/IPC +r6i6n5:378202:378256 [2] NCCL INFO Channel 00 : 6[88000] -> 7[8a000] via P2P/IPC +r7i6n3:610065:610128 [2] NCCL INFO Channel 02 : 14[88000] -> 15[8a000] via P2P/IPC +r6i6n4:257716:257777 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r6i6n5:378202:378256 [2] NCCL INFO Channel 02 : 6[88000] -> 7[8a000] via P2P/IPC +r6i6n4:257714:257762 [0] NCCL INFO Channel 00 : 15[8a000] -> 0[1a000] [receive] via NET/IB/1 +r6i6n5:378200:378262 [0] NCCL INFO Channel 00 : 3[8a000] -> 4[1a000] [receive] via NET/IB/1 +r7i6n3:610063:610122 [0] NCCL INFO Channel 00 : 11[8a000] -> 12[1a000] [receive] via NET/IB/1 +r7i6n2:1370348:1370434 [1] NCCL INFO Channel 00 : 9[1c000] -> 10[88000] via P2P/IPC +r7i6n2:1370349:1370433 [2] NCCL INFO Channel 00 : 10[88000] -> 11[8a000] via P2P/IPC +r7i6n2:1370348:1370434 [1] NCCL INFO Channel 02 : 9[1c000] -> 10[88000] via P2P/IPC +r7i6n2:1370349:1370433 [2] NCCL INFO Channel 02 : 10[88000] -> 11[8a000] via P2P/IPC +r6i6n4:257717:257772 [3] NCCL INFO Channel 00 : 3[8a000] -> 4[1a000] [send] via NET/IB/3 +r7i6n3:610066:610123 [3] NCCL INFO Channel 00 : 15[8a000] -> 0[1a000] [send] via NET/IB/3 +r6i6n5:378203:378255 [3] NCCL INFO Channel 00 : 7[8a000] -> 8[1a000] [send] via NET/IB/3 +r7i6n2:1370347:1370447 [0] NCCL INFO Channel 00 : 7[8a000] -> 8[1a000] [receive] via NET/IB/1 +r6i6n4:257714:257762 [0] NCCL INFO Channel 02 : 15[8a000] -> 0[1a000] [receive] via NET/IB/1 +r6i6n5:378200:378262 [0] NCCL INFO Channel 02 : 3[8a000] -> 4[1a000] [receive] via NET/IB/1 +r7i6n3:610063:610122 [0] NCCL INFO Channel 02 : 11[8a000] -> 12[1a000] [receive] via NET/IB/1 +r7i6n2:1370350:1370452 [3] NCCL INFO Channel 00 : 11[8a000] -> 12[1a000] [send] via NET/IB/3 +r6i6n4:257717:257772 [3] NCCL INFO Channel 02 : 3[8a000] -> 4[1a000] [send] via NET/IB/3 +r7i6n3:610066:610123 [3] NCCL INFO Channel 02 : 15[8a000] -> 0[1a000] [send] via NET/IB/3 +r6i6n4:257716:257777 [2] NCCL INFO Channel 01 : 2[88000] -> 1[1c000] via P2P/IPC +r6i6n4:257716:257777 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r6i6n5:378203:378255 [3] NCCL INFO Channel 02 : 7[8a000] -> 8[1a000] [send] via NET/IB/3 +r7i6n2:1370347:1370447 [0] NCCL INFO Channel 02 : 7[8a000] -> 8[1a000] [receive] via NET/IB/1 +r7i6n2:1370350:1370452 [3] NCCL INFO Channel 02 : 11[8a000] -> 12[1a000] [send] via NET/IB/3 +r7i6n2:1370349:1370433 [2] NCCL INFO Channel 01 : 10[88000] -> 9[1c000] via P2P/IPC +r7i6n2:1370349:1370433 [2] NCCL INFO Channel 03 : 10[88000] -> 9[1c000] via P2P/IPC +r6i6n5:378202:378256 [2] NCCL INFO Channel 01 : 6[88000] -> 5[1c000] via P2P/IPC +r6i6n5:378202:378256 [2] NCCL INFO Channel 03 : 6[88000] -> 5[1c000] via P2P/IPC +r7i6n3:610063:610122 [0] NCCL INFO Channel 00 : 12[1a000] -> 13[1c000] via P2P/IPC +r7i6n3:610063:610122 [0] NCCL INFO Channel 02 : 12[1a000] -> 13[1c000] via P2P/IPC +r7i6n3:610065:610128 [2] NCCL INFO Channel 01 : 14[88000] -> 13[1c000] via P2P/IPC +r7i6n3:610065:610128 [2] NCCL INFO Channel 03 : 14[88000] -> 13[1c000] via P2P/IPC +r6i6n5:378200:378262 [0] NCCL INFO Channel 00 : 4[1a000] -> 5[1c000] via P2P/IPC +r6i6n5:378200:378262 [0] NCCL INFO Channel 02 : 4[1a000] -> 5[1c000] via P2P/IPC +r7i6n3:610064:610133 [1] NCCL INFO Channel 01 : 13[1c000] -> 0[1a000] [send] via NET/IB/1 +r6i6n4:257714:257762 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i6n4:257714:257762 [0] NCCL INFO Channel 02 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n3:610063:610122 [0] NCCL INFO Channel 01 : 9[1c000] -> 12[1a000] [receive] via NET/IB/1 +r7i6n3:610064:610133 [1] NCCL INFO Channel 03 : 13[1c000] -> 0[1a000] [send] via NET/IB/1 +r7i6n2:1370347:1370447 [0] NCCL INFO Channel 00 : 8[1a000] -> 9[1c000] via P2P/IPC +r7i6n2:1370347:1370447 [0] NCCL INFO Channel 02 : 8[1a000] -> 9[1c000] via P2P/IPC +r6i6n5:378200:378262 [0] NCCL INFO Channel 01 : 1[1c000] -> 4[1a000] [receive] via NET/IB/1 +r6i6n4:257714:257762 [0] NCCL INFO Channel 01 : 13[1c000] -> 0[1a000] [receive] via NET/IB/1 +r6i6n4:257715:257767 [1] NCCL INFO Channel 01 : 1[1c000] -> 4[1a000] [send] via NET/IB/1 +r7i6n3:610063:610122 [0] NCCL INFO Channel 03 : 9[1c000] -> 12[1a000] [receive] via NET/IB/1 +r7i6n3:610063:610122 [0] NCCL INFO Channel 01 : 12[1a000] -> 15[8a000] via P2P/IPC +r7i6n3:610063:610122 [0] NCCL INFO Channel 03 : 12[1a000] -> 15[8a000] via P2P/IPC +r7i6n3:610066:610123 [3] NCCL INFO Channel 01 : 15[8a000] -> 14[88000] via P2P/IPC +r7i6n3:610066:610123 [3] NCCL INFO Channel 03 : 15[8a000] -> 14[88000] via P2P/IPC +r6i6n5:378200:378262 [0] NCCL INFO Channel 03 : 1[1c000] -> 4[1a000] [receive] via NET/IB/1 +r7i6n3:610066:610123 [3] NCCL INFO Connected all rings +r6i6n5:378200:378262 [0] NCCL INFO Channel 01 : 4[1a000] -> 7[8a000] via P2P/IPC +r6i6n5:378200:378262 [0] NCCL INFO Channel 03 : 4[1a000] -> 7[8a000] via P2P/IPC +r7i6n2:1370347:1370447 [0] NCCL INFO Channel 01 : 5[1c000] -> 8[1a000] [receive] via NET/IB/1 +r6i6n4:257714:257762 [0] NCCL INFO Channel 03 : 13[1c000] -> 0[1a000] [receive] via NET/IB/1 +r6i6n5:378203:378255 [3] NCCL INFO Channel 01 : 7[8a000] -> 6[88000] via P2P/IPC +r6i6n4:257715:257767 [1] NCCL INFO Channel 03 : 1[1c000] -> 4[1a000] [send] via NET/IB/1 +r6i6n4:257714:257762 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i6n4:257714:257762 [0] NCCL INFO Channel 03 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i6n5:378203:378255 [3] NCCL INFO Channel 03 : 7[8a000] -> 6[88000] via P2P/IPC +r6i6n5:378203:378255 [3] NCCL INFO Connected all rings +r6i6n4:257717:257772 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r6i6n4:257717:257772 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r6i6n5:378200:378262 [0] NCCL INFO Connected all rings +r6i6n5:378200:378262 [0] NCCL INFO Channel 01 : 4[1a000] -> 5[1c000] via P2P/IPC +r6i6n5:378200:378262 [0] NCCL INFO Channel 03 : 4[1a000] -> 5[1c000] via P2P/IPC +r6i6n4:257715:257767 [1] NCCL INFO Connected all rings +r6i6n4:257717:257772 [3] NCCL INFO Connected all rings +r6i6n4:257716:257777 [2] NCCL INFO Connected all rings +r7i6n2:1370347:1370447 [0] NCCL INFO Channel 03 : 5[1c000] -> 8[1a000] [receive] via NET/IB/1 +r7i6n2:1370347:1370447 [0] NCCL INFO Channel 01 : 8[1a000] -> 11[8a000] via P2P/IPC +r6i6n4:257716:257777 [2] NCCL INFO Channel 01 : 2[88000] -> 3[8a000] via P2P/IPC +r6i6n4:257716:257777 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r6i6n5:378201:378257 [1] NCCL INFO Channel 01 : 5[1c000] -> 8[1a000] [send] via NET/IB/1 +r7i6n2:1370347:1370447 [0] NCCL INFO Channel 03 : 8[1a000] -> 11[8a000] via P2P/IPC +r6i6n4:257717:257772 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n2:1370348:1370434 [1] NCCL INFO Channel 01 : 9[1c000] -> 12[1a000] [send] via NET/IB/1 +r6i6n4:257717:257772 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n2:1370350:1370452 [3] NCCL INFO Channel 01 : 11[8a000] -> 10[88000] via P2P/IPC +r7i6n2:1370350:1370452 [3] NCCL INFO Channel 03 : 11[8a000] -> 10[88000] via P2P/IPC +r7i6n2:1370350:1370452 [3] NCCL INFO Connected all rings +r6i6n5:378201:378257 [1] NCCL INFO Channel 03 : 5[1c000] -> 8[1a000] [send] via NET/IB/1 +r6i6n4:257716:257777 [2] NCCL INFO Channel 03 : 2[88000] -> 6[88000] [send] via NET/IB/3 +r7i6n2:1370348:1370434 [1] NCCL INFO Channel 03 : 9[1c000] -> 12[1a000] [send] via NET/IB/1 +r7i6n2:1370347:1370447 [0] NCCL INFO Connected all rings +r6i6n4:257714:257762 [0] NCCL INFO Connected all rings +r7i6n2:1370347:1370447 [0] NCCL INFO Channel 01 : 8[1a000] -> 9[1c000] via P2P/IPC +r6i6n4:257714:257762 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i6n5:378201:378257 [1] NCCL INFO Connected all rings +r6i6n4:257714:257762 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n3:610063:610122 [0] NCCL INFO Connected all rings +r7i6n2:1370347:1370447 [0] NCCL INFO Channel 03 : 8[1a000] -> 9[1c000] via P2P/IPC +r6i6n5:378202:378256 [2] NCCL INFO Connected all rings +r7i6n3:610064:610133 [1] NCCL INFO Connected all rings +r6i6n5:378202:378256 [2] NCCL INFO Channel 01 : 6[88000] -> 7[8a000] via P2P/IPC +r7i6n3:610063:610122 [0] NCCL INFO Channel 01 : 12[1a000] -> 13[1c000] via P2P/IPC +r7i6n2:1370348:1370434 [1] NCCL INFO Connected all rings +r6i6n4:257715:257767 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i6n4:257715:257767 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i6n4:257715:257767 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n3:610063:610122 [0] NCCL INFO Channel 03 : 12[1a000] -> 13[1c000] via P2P/IPC +r6i6n5:378202:378256 [2] NCCL INFO Channel 03 : 6[88000] -> 7[8a000] via P2P/IPC +r7i6n3:610065:610128 [2] NCCL INFO Connected all rings +r7i6n2:1370349:1370433 [2] NCCL INFO Connected all rings +r6i6n4:257715:257767 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n3:610065:610128 [2] NCCL INFO Channel 01 : 14[88000] -> 15[8a000] via P2P/IPC +r7i6n2:1370349:1370433 [2] NCCL INFO Channel 01 : 10[88000] -> 11[8a000] via P2P/IPC +r7i6n3:610065:610128 [2] NCCL INFO Channel 03 : 14[88000] -> 15[8a000] via P2P/IPC +r7i6n2:1370349:1370433 [2] NCCL INFO Channel 03 : 10[88000] -> 11[8a000] via P2P/IPC +r7i6n3:610066:610123 [3] NCCL INFO Channel 01 : 15[8a000] -> 12[1a000] via P2P/IPC +r7i6n3:610064:610133 [1] NCCL INFO Channel 00 : 13[1c000] -> 12[1a000] via P2P/IPC +r7i6n3:610066:610123 [3] NCCL INFO Channel 03 : 15[8a000] -> 12[1a000] via P2P/IPC +r7i6n3:610064:610133 [1] NCCL INFO Channel 01 : 13[1c000] -> 12[1a000] via P2P/IPC +r7i6n3:610064:610133 [1] NCCL INFO Channel 02 : 13[1c000] -> 12[1a000] via P2P/IPC +r6i6n4:257714:257762 [0] NCCL INFO Channel 02 : 0[1a000] -> 4[1a000] [send] via NET/IB/1 +r7i6n3:610064:610133 [1] NCCL INFO Channel 03 : 13[1c000] -> 12[1a000] via P2P/IPC +r6i6n5:378200:378262 [0] NCCL INFO Channel 02 : 0[1a000] -> 4[1a000] [receive] via NET/IB/1 +r6i6n5:378201:378257 [1] NCCL INFO Channel 02 : 5[1c000] -> 8[1a000] [send] via NET/IB/1 +r6i6n5:378203:378255 [3] NCCL INFO Channel 03 : 7[8a000] -> 10[88000] [send] via NET/IB/3 +r7i6n3:610063:610122 [0] NCCL INFO Channel 00 : 8[1a000] -> 12[1a000] [receive] via NET/IB/1 +r6i6n5:378202:378256 [2] NCCL INFO Channel 03 : 2[88000] -> 6[88000] [receive] via NET/IB/3 +r7i6n2:1370347:1370447 [0] NCCL INFO Channel 02 : 5[1c000] -> 8[1a000] [receive] via NET/IB/1 +r7i6n2:1370348:1370434 [1] NCCL INFO Channel 00 : 4[1a000] -> 9[1c000] [receive] via NET/IB/1 +r7i6n3:610065:610128 [2] NCCL INFO Channel 01 : 10[88000] -> 14[88000] [receive] via NET/IB/3 +r7i6n2:1370350:1370452 [3] NCCL INFO Channel 01 : 6[88000] -> 11[8a000] [receive] via NET/IB/3 +r7i6n2:1370349:1370433 [2] NCCL INFO Channel 03 : 7[8a000] -> 10[88000] [receive] via NET/IB/3 +r6i6n4:257714:257762 [0] NCCL INFO Channel 00 : 8[1a000] -> 0[1a000] [receive] via NET/IB/1 +r6i6n5:378200:378262 [0] NCCL INFO Channel 00 : 4[1a000] -> 9[1c000] [send] via NET/IB/1 +r6i6n4:257716:257777 [2] NCCL INFO Channel 01 : 10[88000] -> 2[88000] [receive] via NET/IB/3 +r6i6n5:378202:378256 [2] NCCL INFO Channel 01 : 6[88000] -> 11[8a000] [send] via NET/IB/3 +r7i6n2:1370347:1370447 [0] NCCL INFO Channel 00 : 8[1a000] -> 12[1a000] [send] via NET/IB/1 +r6i6n5:378201:378257 [1] NCCL INFO Channel 02 : 8[1a000] -> 5[1c000] [receive] via NET/IB/1 +r6i6n5:378203:378255 [3] NCCL INFO Channel 03 : 10[88000] -> 7[8a000] [receive] via NET/IB/3 +r6i6n5:378203:378255 [3] NCCL INFO Channel 01 : 7[8a000] -> 4[1a000] via P2P/IPC +r7i6n2:1370349:1370433 [2] NCCL INFO Channel 01 : 10[88000] -> 14[88000] [send] via NET/IB/3 +r6i6n5:378203:378255 [3] NCCL INFO Channel 03 : 7[8a000] -> 4[1a000] via P2P/IPC +r6i6n4:257714:257762 [0] NCCL INFO Channel 00 : 0[1a000] -> 8[1a000] [send] via NET/IB/1 +r6i6n4:257716:257777 [2] NCCL INFO Channel 01 : 2[88000] -> 10[88000] [send] via NET/IB/3 +r6i6n5:378200:378262 [0] NCCL INFO Channel 02 : 12[1a000] -> 4[1a000] [receive] via NET/IB/1 +r7i6n2:1370348:1370434 [1] NCCL INFO Channel 00 : 9[1c000] -> 4[1a000] [send] via NET/IB/1 +r6i6n5:378202:378256 [2] NCCL INFO Channel 03 : 14[88000] -> 6[88000] [receive] via NET/IB/3 +r7i6n3:610063:610122 [0] NCCL INFO Channel 02 : 4[1a000] -> 12[1a000] [receive] via NET/IB/1 +r7i6n2:1370347:1370447 [0] NCCL INFO Channel 00 : 0[1a000] -> 8[1a000] [receive] via NET/IB/1 +r7i6n2:1370350:1370452 [3] NCCL INFO Channel 01 : 11[8a000] -> 6[88000] [send] via NET/IB/3 +r7i6n3:610065:610128 [2] NCCL INFO Channel 03 : 6[88000] -> 14[88000] [receive] via NET/IB/3 +r7i6n2:1370349:1370433 [2] NCCL INFO Channel 01 : 2[88000] -> 10[88000] [receive] via NET/IB/3 +r6i6n5:378200:378262 [0] NCCL INFO Channel 02 : 4[1a000] -> 12[1a000] [send] via NET/IB/1 +r6i6n5:378202:378256 [2] NCCL INFO Channel 03 : 6[88000] -> 14[88000] [send] via NET/IB/3 +r7i6n3:610063:610122 [0] NCCL INFO Channel 02 : 12[1a000] -> 4[1a000] [send] via NET/IB/1 +r7i6n2:1370347:1370447 [0] NCCL INFO Channel 00 : 8[1a000] -> 0[1a000] [send] via NET/IB/1 +r7i6n3:610065:610128 [2] NCCL INFO Channel 03 : 14[88000] -> 6[88000] [send] via NET/IB/3 +r7i6n2:1370349:1370433 [2] NCCL INFO Channel 01 : 10[88000] -> 2[88000] [send] via NET/IB/3 +r6i6n5:378200:378262 [0] NCCL INFO Channel 00 : 9[1c000] -> 4[1a000] [receive] via NET/IB/1 +r7i6n3:610063:610122 [0] NCCL INFO Channel 00 : 12[1a000] -> 8[1a000] [send] via NET/IB/1 +r6i6n4:257714:257762 [0] NCCL INFO Channel 02 : 4[1a000] -> 0[1a000] [receive] via NET/IB/1 +r7i6n2:1370347:1370447 [0] NCCL INFO Channel 00 : 12[1a000] -> 8[1a000] [receive] via NET/IB/1 +r7i6n3:610065:610128 [2] NCCL INFO Channel 01 : 14[88000] -> 10[88000] [send] via NET/IB/3 +r6i6n5:378202:378256 [2] NCCL INFO Channel 01 : 11[8a000] -> 6[88000] [receive] via NET/IB/3 +r7i6n2:1370348:1370434 [1] NCCL INFO Channel 00 : 9[1c000] -> 8[1a000] via P2P/IPC +r6i6n4:257716:257777 [2] NCCL INFO Channel 03 : 6[88000] -> 2[88000] [receive] via NET/IB/3 +r7i6n2:1370348:1370434 [1] NCCL INFO Channel 01 : 9[1c000] -> 8[1a000] via P2P/IPC +r7i6n2:1370349:1370433 [2] NCCL INFO Channel 01 : 14[88000] -> 10[88000] [receive] via NET/IB/3 +r7i6n2:1370350:1370452 [3] NCCL INFO Channel 01 : 11[8a000] -> 8[1a000] via P2P/IPC +r7i6n2:1370348:1370434 [1] NCCL INFO Channel 02 : 9[1c000] -> 8[1a000] via P2P/IPC +r7i6n2:1370350:1370452 [3] NCCL INFO Channel 03 : 11[8a000] -> 8[1a000] via P2P/IPC +r7i6n3:610066:610123 [3] NCCL INFO Channel 00 : 15[8a000] -> 14[88000] via P2P/IPC +r7i6n2:1370348:1370434 [1] NCCL INFO Channel 03 : 9[1c000] -> 8[1a000] via P2P/IPC +r7i6n3:610066:610123 [3] NCCL INFO Channel 02 : 15[8a000] -> 14[88000] via P2P/IPC +r7i6n3:610065:610128 [2] NCCL INFO Channel 00 : 14[88000] -> 13[1c000] via P2P/IPC +r7i6n3:610065:610128 [2] NCCL INFO Channel 02 : 14[88000] -> 13[1c000] via P2P/IPC +r6i6n5:378200:378262 [0] NCCL INFO Channel 02 : 4[1a000] -> 0[1a000] [send] via NET/IB/1 +r7i6n3:610063:610122 [0] NCCL INFO Connected all trees +r7i6n3:610063:610122 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512 +r7i6n3:610063:610122 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i6n3:610066:610123 [3] NCCL INFO Connected all trees +r7i6n3:610066:610123 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512 +r7i6n3:610066:610123 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i6n5:378202:378256 [2] NCCL INFO Channel 03 : 6[88000] -> 2[88000] [send] via NET/IB/3 +r7i6n3:610065:610128 [2] NCCL INFO Connected all trees +r7i6n3:610065:610128 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512 +r7i6n3:610065:610128 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i6n2:1370347:1370447 [0] NCCL INFO Channel 02 : 8[1a000] -> 5[1c000] [send] via NET/IB/1 +r7i6n3:610064:610133 [1] NCCL INFO Connected all trees +r7i6n3:610064:610133 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512 +r7i6n3:610064:610133 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i6n3:610064:610133 [1] NCCL INFO comm 0x1471e8002fb0 rank 13 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r7i6n3:610063:610122 [0] NCCL INFO comm 0x148058002fb0 rank 12 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r7i6n3:610066:610123 [3] NCCL INFO comm 0x155220002fb0 rank 15 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r7i6n3:610065:610128 [2] NCCL INFO comm 0x1521c8002fb0 rank 14 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r7i6n2:1370349:1370433 [2] NCCL INFO Channel 03 : 10[88000] -> 7[8a000] [send] via NET/IB/3 +r6i6n4:257717:257772 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n2:1370350:1370452 [3] NCCL INFO Channel 00 : 11[8a000] -> 10[88000] via P2P/IPC +r6i6n5:378202:378256 [2] NCCL INFO Channel 00 : 6[88000] -> 5[1c000] via P2P/IPC +r6i6n4:257717:257772 [3] NCCL INFO Channel 02 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n2:1370350:1370452 [3] NCCL INFO Channel 02 : 11[8a000] -> 10[88000] via P2P/IPC +r6i6n4:257716:257777 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r6i6n4:257716:257777 [2] NCCL INFO Channel 02 : 2[88000] -> 1[1c000] via P2P/IPC +r6i6n5:378202:378256 [2] NCCL INFO Channel 02 : 6[88000] -> 5[1c000] via P2P/IPC +r6i6n5:378201:378257 [1] NCCL INFO Channel 00 : 5[1c000] -> 4[1a000] via P2P/IPC +r7i6n2:1370349:1370433 [2] NCCL INFO Channel 00 : 10[88000] -> 9[1c000] via P2P/IPC +r6i6n5:378201:378257 [1] NCCL INFO Channel 01 : 5[1c000] -> 4[1a000] via P2P/IPC +r6i6n5:378203:378255 [3] NCCL INFO Channel 00 : 7[8a000] -> 6[88000] via P2P/IPC +r6i6n4:257717:257772 [3] NCCL INFO Connected all trees +r7i6n2:1370349:1370433 [2] NCCL INFO Channel 02 : 10[88000] -> 9[1c000] via P2P/IPC +r6i6n4:257717:257772 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512 +r6i6n4:257717:257772 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i6n5:378201:378257 [1] NCCL INFO Channel 02 : 5[1c000] -> 4[1a000] via P2P/IPC +r6i6n5:378203:378255 [3] NCCL INFO Channel 02 : 7[8a000] -> 6[88000] via P2P/IPC +r6i6n5:378201:378257 [1] NCCL INFO Channel 03 : 5[1c000] -> 4[1a000] via P2P/IPC +r6i6n4:257714:257762 [0] NCCL INFO Connected all trees +r7i6n2:1370350:1370452 [3] NCCL INFO Connected all trees +r6i6n4:257714:257762 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512 +r6i6n4:257714:257762 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i6n4:257716:257777 [2] NCCL INFO Connected all trees +r6i6n4:257716:257777 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512 +r6i6n4:257716:257777 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i6n2:1370350:1370452 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512 +r7i6n2:1370350:1370452 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i6n5:378203:378255 [3] NCCL INFO Connected all trees +r6i6n5:378203:378255 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512 +r6i6n5:378203:378255 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i6n4:257715:257767 [1] NCCL INFO Connected all trees +r6i6n4:257715:257767 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512 +r6i6n4:257715:257767 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i6n4:257714:257762 [0] NCCL INFO comm 0x145844002fb0 rank 0 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r6i6n4:257715:257767 [1] NCCL INFO comm 0x14c6f8002fb0 rank 1 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r6i6n4:257717:257772 [3] NCCL INFO comm 0x149830002fb0 rank 3 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r6i6n4:257716:257777 [2] NCCL INFO comm 0x151a88002fb0 rank 2 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r7i6n2:1370349:1370433 [2] NCCL INFO Connected all trees +r7i6n2:1370349:1370433 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512 +r7i6n2:1370349:1370433 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i6n2:1370347:1370447 [0] NCCL INFO Connected all trees +r7i6n2:1370347:1370447 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512 +r7i6n2:1370347:1370447 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i6n4:257714:257714 [0] NCCL INFO Launch mode Parallel +r6i6n5:378202:378256 [2] NCCL INFO Connected all trees +r6i6n5:378202:378256 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512 +r6i6n5:378202:378256 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i6n2:1370348:1370434 [1] NCCL INFO Connected all trees +r7i6n2:1370348:1370434 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512 +r6i6n5:378200:378262 [0] NCCL INFO Connected all trees +r7i6n2:1370348:1370434 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i6n5:378200:378262 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512 +r6i6n5:378200:378262 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i6n2:1370347:1370447 [0] NCCL INFO comm 0x151020002fb0 rank 8 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r6i6n5:378201:378257 [1] NCCL INFO Connected all trees +r7i6n2:1370348:1370434 [1] NCCL INFO comm 0x14d418002fb0 rank 9 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r7i6n2:1370350:1370452 [3] NCCL INFO comm 0x154f28002fb0 rank 11 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r6i6n5:378201:378257 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/512 +r6i6n5:378201:378257 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i6n2:1370349:1370433 [2] NCCL INFO comm 0x154d48002fb0 rank 10 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r6i6n5:378200:378262 [0] NCCL INFO comm 0x153408002fb0 rank 4 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r6i6n5:378202:378256 [2] NCCL INFO comm 0x154188002fb0 rank 6 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r6i6n5:378201:378257 [1] NCCL INFO comm 0x14c900002fb0 rank 5 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r6i6n5:378203:378255 [3] NCCL INFO comm 0x14ef58002fb0 rank 7 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +ignore me 6 +14: + duration: 1.1593 sec + algo throughput: 55204675592.2273 bps, 55.2047 Gbps + busbw: 51.7544 Gbps +ignore me 6 +ignore me 6 +15: + duration: 1.2942 sec + algo throughput: 49451976290.1993 bps, 49.4520 Gbps + busbw: 46.3612 Gbps +13: + duration: 1.1545 sec + algo throughput: 55435153048.8659 bps, 55.4352 Gbps + busbw: 51.9705 Gbps +ignore me 6 +12: + duration: 1.2946 sec + algo throughput: 49434318117.6515 bps, 49.4343 Gbps + busbw: 46.3447 Gbps +ignore me 6 +ignore me 6 +9: + duration: 1.4402 sec + algo throughput: 44438492090.8732 bps, 44.4385 Gbps + busbw: 41.6611 Gbps +10: + duration: 1.4738 sec + algo throughput: 43424520166.0441 bps, 43.4245 Gbps +ignore me 6 + busbw: 40.7105 Gbps +ignore me 6 +ignore me 6 +3: + duration: 1.7691 sec + algo throughput: 36177572497.7145 bps, 36.1776 Gbps + busbw: 33.9165 Gbps +0: +11: + duration: 1.0927 sec + duration: 1.8093 sec + algo throughput: 35371927695.6812 bps, 35.3719 Gbps + busbw: 33.1612 Gbps + algo throughput: 58569704243.7844 bps, 58.5697 Gbps + busbw: 54.9091 Gbps +ignore me 6 +ignore me 6 +5: + duration: 2.0802 sec + algo throughput: 30765780785.6832 bps, 30.7658 Gbps + busbw: 28.8429 Gbps +ignore me 6 +6: + duration: 2.1418 sec + algo throughput: 29880845367.0138 bps, 29.8808 Gbps + busbw: 28.0133 Gbps +ignore me 6 +8: + duration: 1.2561 sec + algo throughput: 50951080615.8564 bps, 50.9511 Gbps + busbw: 47.7666 Gbps +7: + duration: 1.8124 sec + algo throughput: 35312957596.3833 bps, 35.3130 Gbps + busbw: 33.1059 Gbps +ignore me 6 +4: + duration: 1.7526 sec + algo throughput: 36517122206.3803 bps, 36.5171 Gbps + busbw: 34.2348 Gbps +ignore me 6 +1: + duration: 1.8395 sec + algo throughput: 34792737240.4271 bps, 34.7927 Gbps + busbw: 32.6182 Gbps +ignore me 6 +2: + duration: 1.7637 sec + algo throughput: 36287170944.4988 bps, 36.2872 Gbps + busbw: 34.0192 Gbps +ignore me 109 +14: + duration: 0.7080 sec + algo throughput: 90399491760.9001 bps, 90.3995 Gbps + busbw: 84.7495 Gbps +ignore me 109 +15: + duration: 0.7080 sec + algo throughput: 90395163203.6951 bps, 90.3952 Gbps + busbw: 84.7455 Gbps +ignore me 109 +13: + duration: 0.7081 sec + algo throughput: 90382326783.5510 bps, 90.3823 Gbps + busbw: 84.7334 Gbps +ignore me 109 +12: + duration: 0.7080 sec + algo throughput: 90401745663.7657 bps, 90.4017 Gbps + busbw: 84.7516 Gbps +ignore me 109 +9: + duration: 0.7080 sec + algo throughput: 90395783074.5905 bps, 90.3958 Gbps + busbw: 84.7460 Gbps +ignore me 109 +10: + duration: 0.7082 sec + algo throughput: 90374224799.5715 bps, 90.3742 Gbps + busbw: 84.7258 Gbps +ignore me 109 +0: + duration: 0.7083 sec + algo throughput: 90354374863.7591 bps, 90.3544 Gbps + busbw: 84.7072 Gbps +ignore me 109 +11: + duration: 0.7084 sec + algo throughput: 90343336684.2220 bps, 90.3433 Gbps + busbw: 84.6969 Gbps +ignore me 109 +3: + duration: 0.7087 sec + algo throughput: 90311896434.2268 bps, 90.3119 Gbps + busbw: 84.6674 Gbps +ignore me 109 +8: + duration: 0.7085 sec + algo throughput: 90330088518.1323 bps, 90.3301 Gbps + busbw: 84.6845 Gbps +ignore me 109 +ignore me 109 +2: + duration: 0.7085 sec + algo throughput: 90337030385.0629 bps, 90.3370 Gbps + busbw: 84.6910 Gbps +5: + duration: 0.7088 sec + algo throughput: 90287308758.8899 bps, 90.2873 Gbps + busbw: 84.6444 Gbps +ignore me 109 +ignore me 109 +1: + duration: 0.7089 sec + algo throughput: 90280901515.7927 bps, 90.2809 Gbps + busbw: 84.6383 Gbps +6: + duration: 0.7090 sec + algo throughput: 90270047942.0345 bps, 90.2700 Gbps + busbw: 84.6282 Gbps +ignore me 109 +7: + duration: 0.7090 sec + algo throughput: 90272586091.4933 bps, 90.2726 Gbps + busbw: 84.6305 Gbps +ignore me 109 +4: + duration: 0.7085 sec + algo throughput: 90337161208.6908 bps, 90.3372 Gbps + busbw: 84.6911 Gbps +ignore me 1749 +14: + duration: 0.7107 sec + algo throughput: 90058256584.7650 bps, 90.0583 Gbps + busbw: 84.4296 Gbps +ignore me 1749 +ignore me 1749 +15: + duration: 0.7107 sec + algo throughput: 90057464420.3045 bps, 90.0575 Gbps + busbw: 84.4289 Gbps +13: + duration: 0.7106 sec + algo throughput: 90070702828.5613 bps, 90.0707 Gbps + busbw: 84.4413 Gbps +ignore me 1749 +ignore me 1749 +12: + duration: 0.7106 sec + algo throughput: 90059933061.1509 bps, 90.0599 Gbps + busbw: 84.4312 Gbps +9: + duration: 0.7105 sec + algo throughput: 90071340053.9053 bps, 90.0713 Gbps + busbw: 84.4419 Gbps +ignore me 1749 +10: + duration: 0.7106 sec + algo throughput: 90063253431.3530 bps, 90.0633 Gbps + busbw: 84.4343 Gbps +ignore me 1749 +ignore me 1749 +11: + duration: 0.7106 sec + algo throughput: 90065670303.2662 bps, 90.0657 Gbps + busbw: 84.4366 Gbps +0: + duration: 0.7107 sec + algo throughput: 90053334417.7426 bps, 90.0533 Gbps + busbw: 84.4250 Gbps +ignore me 1749 +3: + duration: 0.7106 sec + algo throughput: 90068692693.3661 bps, 90.0687 Gbps + busbw: 84.4394 Gbps +ignore me 1749 +ignore me 1749 +ignore me 1749 +8: + duration: 0.7105 sec +2: + duration: 0.7104 sec + algo throughput: 90072894085.7098 bps, 90.0729 Gbps + busbw: 84.4433 Gbps + algo throughput: 90091360420.7079 bps, 90.0914 Gbps + busbw: 84.4607 Gbps +ignore me 1749 +ignore me 1749 +5: + duration: 0.7104 sec + algo throughput: 90091316675.7603 bps, 90.0913 Gbps + busbw: 84.4606 Gbps +1: + duration: 0.7103 sec + algo throughput: 90101456511.8536 bps, 90.1015 Gbps + busbw: 84.4701 Gbps +6: + duration: 0.7103 sec + algo throughput: 90107024226.3038 bps, 90.1070 Gbps + busbw: 84.4753 Gbps +ignore me 1749 +7: + duration: 0.7103 sec + algo throughput: 90107799997.7677 bps, 90.1078 Gbps + busbw: 84.4761 Gbps +ignore me 1749 +4: + duration: 0.7103 sec + algo throughput: 90102477650.2766 bps, 90.1025 Gbps + busbw: 84.4711 Gbps +ignore me 27986 +14: + duration: 0.7092 sec + algo throughput: 90242129271.5844 bps, 90.2421 Gbps + busbw: 84.6020 Gbps +ignore me 27986 +ignore me 27986 +15: + duration: 0.7093 sec + algo throughput: 90233065038.0259 bps, 90.2331 Gbps + busbw: 84.5935 Gbps +13: + duration: 0.7093 sec + algo throughput: 90226024022.6829 bps, 90.2260 Gbps + busbw: 84.5869 Gbps +ignore me 27986 +12: + duration: 0.7092 sec + algo throughput: 90236901241.3211 bps, 90.2369 Gbps + busbw: 84.5971 Gbps +ignore me 27986 +9: + duration: 0.7093 sec + algo throughput: 90231794012.9985 bps, 90.2318 Gbps + busbw: 84.5923 Gbps +ignore me 27986 +10: + duration: 0.7093 sec + algo throughput: 90224093186.3902 bps, 90.2241 Gbps + busbw: 84.5851 Gbps +ignore me 27986 +ignore me 27986 +0: + duration: 0.7092 sec +11: + duration: 0.7092 sec + algo throughput: 90246123531.0302 bps, 90.2461 Gbps + busbw: 84.6057 Gbps + algo throughput: 90237670852.4900 bps, 90.2377 Gbps + busbw: 84.5978 Gbps +ignore me 27986 +3: + duration: 0.7093 sec + algo throughput: 90235789890.2677 bps, 90.2358 Gbps + busbw: 84.5961 Gbps +ignore me 27986 +8: + duration: 0.7092 sec + algo throughput: 90238335770.9699 bps, 90.2383 Gbps + busbw: 84.5984 Gbps +ignore me 27986 +ignore me 27986 +2: + duration: 0.7093 sec + algo throughput: 90223737057.9605 bps, 90.2237 Gbps + busbw: 84.5848 Gbps +ignore me 27986 +ignore me 27986 +5: + duration: 0.7093 sec + algo throughput: 90226816489.8323 bps, 90.2268 Gbps + busbw: 84.5876 Gbps +6: + duration: 0.7093 sec + algo throughput: 90227312447.8407 bps, 90.2273 Gbps + busbw: 84.5881 Gbps +1: + duration: 0.7094 sec + algo throughput: 90222924803.6610 bps, 90.2229 Gbps + busbw: 84.5840 Gbps +ignore me 27986 +7: + duration: 0.7093 sec + algo throughput: 90229254099.1920 bps, 90.2293 Gbps + busbw: 84.5899 Gbps +ignore me 27986 +4: + duration: 0.7094 sec + algo throughput: 90217548148.5392 bps, 90.2175 Gbps + busbw: 84.5790 Gbps +ignore me 447779 +14: + duration: 0.7079 sec + algo throughput: 90401898007.1683 bps, 90.4019 Gbps + busbw: 84.7518 Gbps +ignore me 447779 +13: + duration: 0.7078 sec + algo throughput: 90422510545.5320 bps, 90.4225 Gbps + busbw: 84.7711 Gbps +ignore me 447779 +15: + duration: 0.7080 sec + algo throughput: 90397684358.3370 bps, 90.3977 Gbps + busbw: 84.7478 Gbps +ignore me 447779 +12: + duration: 0.7080 sec + algo throughput: 90398934791.1951 bps, 90.3989 Gbps + busbw: 84.7490 Gbps +ignore me 447779 +10: + duration: 0.7079 sec + algo throughput: 90404439072.1211 bps, 90.4044 Gbps + busbw: 84.7542 Gbps +ignore me 447779 +11: + duration: 0.7078 sec + algo throughput: 90415260229.4886 bps, 90.4153 Gbps + busbw: 84.7643 Gbps +ignore me 447779 +ignore me 447779 +9: + duration: 0.7086 sec + algo throughput: 90317814308.9687 bps, 90.3178 Gbps + busbw: 84.6730 Gbps +0: + duration: 0.7081 sec + algo throughput: 90384670565.8098 bps, 90.3847 Gbps + busbw: 84.7356 Gbps +ignore me 447779 +8: + duration: 0.7080 sec + algo throughput: 90401729311.5575 bps, 90.4017 Gbps + busbw: 84.7516 Gbps +ignore me 447779 +ignore me 447779 +2: + duration: 0.7081 sec + algo throughput: 90388659575.5084 bps, 90.3887 Gbps + busbw: 84.7394 Gbps +1: + duration: 0.7080 sec + algo throughput: 90397847806.5952 bps, 90.3978 Gbps + busbw: 84.7480 Gbps +ignore me 447779 +ignore me 447779 +3: + duration: 0.7086 sec +6: + duration: 0.7083 sec + algo throughput: 90320233896.8830 bps, 90.3202 Gbps + busbw: 84.6752 Gbps + algo throughput: 90360979559.3395 bps, 90.3610 Gbps + busbw: 84.7134 Gbps +ignore me 447779 +7: + duration: 0.7083 sec + algo throughput: 90360919482.0588 bps, 90.3609 Gbps + busbw: 84.7134 Gbps +ignore me 447779 +5: + duration: 0.7087 sec + algo throughput: 90307672345.7347 bps, 90.3077 Gbps + busbw: 84.6634 Gbps +ignore me 447779 +4: + duration: 0.7085 sec + algo throughput: 90328680753.0585 bps, 90.3287 Gbps + busbw: 84.6831 Gbps diff --git a/experiments/bandwidth/all_reduce_bench-a100-n4.slurm b/experiments/bandwidth/all_reduce_bench-a100-n4.slurm new file mode 100644 index 0000000000000000000000000000000000000000..1811abed60c8a7d7be060b80c41360e53c671beb --- /dev/null +++ b/experiments/bandwidth/all_reduce_bench-a100-n4.slurm @@ -0,0 +1,24 @@ +#!/bin/bash +#SBATCH --job-name=all_reduce_bench-a100-n4 +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --nodes=4 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml + +export NNODES=$SLURM_NNODES +export GPUS_PER_NODE=8 +export NCCL_DEBUG=info + +export LOG_FILE=all_reduce_bench-a100-$NNODES.txt + +export MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) + +srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.launch --nnodes $NNODES --nproc_per_node $GPUS_PER_NODE --node_rank $SLURM_PROCID --master_addr $MASTER_ADDR --master_port 12345 all_reduce_bench.py' 2>&1 | tee $LOG_FILE diff --git a/experiments/bandwidth/all_reduce_bench-a100-n4.txt b/experiments/bandwidth/all_reduce_bench-a100-n4.txt new file mode 100644 index 0000000000000000000000000000000000000000..7869d0bc72dc503cfebcaeadbc5ce8052e6ce9fd --- /dev/null +++ b/experiments/bandwidth/all_reduce_bench-a100-n4.txt @@ -0,0 +1,1424 @@ +/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use_env is set by default in torchrun. +If your script expects `--local_rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + warnings.warn( +/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use_env is set by default in torchrun. +If your script expects `--local_rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + warnings.warn( +/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use_env is set by default in torchrun. +If your script expects `--local_rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + warnings.warn( +/gpfswork/rech/six/commun/conda/hf-prod/lib/python3.8/site-packages/torch/distributed/launch.py:178: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use_env is set by default in torchrun. +If your script expects `--local_rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + warnings.warn( +WARNING:torch.distributed.run: +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +WARNING:torch.distributed.run: +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +WARNING:torch.distributed.run: +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +WARNING:torch.distributed.run: +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +local_rank: 1 +local_rank: 3 +local_rank: 5 +local_rank: 6 +local_rank: 7 +local_rank: 2 +local_rank: 2 +local_rank: 3 +local_rank: 2 +local_rank: 1 +local_rank: 0 +local_rank: 2 +local_rank: 0 +local_rank: 0 +local_rank: 5 +local_rank: 1 +local_rank: 4 +local_rank: 3 +local_rank: 7 +local_rank: 7 +local_rank: 6 +local_rank: 6 +local_rank: 4 +local_rank: 5 +local_rank: 5 +local_rank: 1 +local_rank: 3 +local_rank: 4 +local_rank: 6 +local_rank: 7 +local_rank: 0 +local_rank: 4 +0 data size: 4.0 GB +1 data size: 4.0 GB +5 data size: 4.0 GB +20 data size: 4.0 GB +30 data size: 4.0 GB +3 data size: 4.0 GB +12 data size: 4.0 GB +21 data size: 4.0 GB +28 data size: 4.0 GB +17 data size: 4.0 GB +2 data size: 4.0 GB +25 data size: 4.0 GB +19 data size: 4.0 GB +22 data size: 4.0 GB +16 data size: 4.0 GB +15 data size: 4.0 GB +26 data size: 4.0 GB +27 data size: 4.0 GB +6 data size: 4.0 GB +24 data size: 4.0 GB +9 data size: 4.0 GB +29 data size: 4.0 GB +23 data size: 4.0 GB +31 data size: 4.0 GB +14 data size: 4.0 GB +7 data size: 4.0 GB +18 data size: 4.0 GB +8 data size: 4.0 GB +11 data size: 4.0 GB +10 data size: 4.0 GB +13 data size: 4.0 GB +4 data size: 4.0 GB +jean-zay-iam37:261379:261379 [0] NCCL INFO Bootstrap : Using ib0:10.148.8.203<0> +jean-zay-iam37:261379:261379 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam37:261379:261379 [0] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.203<0> +jean-zay-iam37:261379:261379 [0] NCCL INFO Using network IB +NCCL version 2.10.3+cuda11.3 +jean-zay-iam37:261380:261380 [1] NCCL INFO Bootstrap : Using ib0:10.148.8.203<0> +jean-zay-iam37:261383:261383 [4] NCCL INFO Bootstrap : Using ib0:10.148.8.203<0> +jean-zay-iam37:261384:261384 [5] NCCL INFO Bootstrap : Using ib0:10.148.8.203<0> +jean-zay-iam37:261386:261386 [7] NCCL INFO Bootstrap : Using ib0:10.148.8.203<0> +jean-zay-iam37:261381:261381 [2] NCCL INFO Bootstrap : Using ib0:10.148.8.203<0> +jean-zay-iam37:261382:261382 [3] NCCL INFO Bootstrap : Using ib0:10.148.8.203<0> +jean-zay-iam41:276753:276753 [7] NCCL INFO Bootstrap : Using ib0:10.148.8.206<0> +jean-zay-iam41:276748:276748 [2] NCCL INFO Bootstrap : Using ib0:10.148.8.206<0> +jean-zay-iam41:276747:276747 [1] NCCL INFO Bootstrap : Using ib0:10.148.8.206<0> +jean-zay-iam41:276752:276752 [6] NCCL INFO Bootstrap : Using ib0:10.148.8.206<0> +jean-zay-iam41:276750:276750 [4] NCCL INFO Bootstrap : Using ib0:10.148.8.206<0> +jean-zay-iam41:276751:276751 [5] NCCL INFO Bootstrap : Using ib0:10.148.8.206<0> +jean-zay-iam41:276746:276746 [0] NCCL INFO Bootstrap : Using ib0:10.148.8.206<0> +jean-zay-iam41:276749:276749 [3] NCCL INFO Bootstrap : Using ib0:10.148.8.206<0> +jean-zay-iam37:261384:261384 [5] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam52:263016:263016 [1] NCCL INFO Bootstrap : Using ib0:10.148.8.217<0> +jean-zay-iam52:263017:263017 [2] NCCL INFO Bootstrap : Using ib0:10.148.8.217<0> +jean-zay-iam52:263018:263018 [3] NCCL INFO Bootstrap : Using ib0:10.148.8.217<0> +jean-zay-iam52:263021:263021 [6] NCCL INFO Bootstrap : Using ib0:10.148.8.217<0> +jean-zay-iam52:263019:263019 [4] NCCL INFO Bootstrap : Using ib0:10.148.8.217<0> +jean-zay-iam52:263015:263015 [0] NCCL INFO Bootstrap : Using ib0:10.148.8.217<0> +jean-zay-iam52:263020:263020 [5] NCCL INFO Bootstrap : Using ib0:10.148.8.217<0> +jean-zay-iam52:263022:263022 [7] NCCL INFO Bootstrap : Using ib0:10.148.8.217<0> +jean-zay-iam37:261386:261386 [7] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam37:261381:261381 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam37:261380:261380 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam37:261383:261383 [4] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam40:289973:289973 [6] NCCL INFO Bootstrap : Using ib0:10.148.8.205<0> +jean-zay-iam40:289967:289967 [0] NCCL INFO Bootstrap : Using ib0:10.148.8.205<0> +jean-zay-iam40:289969:289969 [2] NCCL INFO Bootstrap : Using ib0:10.148.8.205<0> +jean-zay-iam40:289971:289971 [4] NCCL INFO Bootstrap : Using ib0:10.148.8.205<0> +jean-zay-iam40:289970:289970 [3] NCCL INFO Bootstrap : Using ib0:10.148.8.205<0> +jean-zay-iam40:289972:289972 [5] NCCL INFO Bootstrap : Using ib0:10.148.8.205<0> +jean-zay-iam40:289968:289968 [1] NCCL INFO Bootstrap : Using ib0:10.148.8.205<0> +jean-zay-iam40:289974:289974 [7] NCCL INFO Bootstrap : Using ib0:10.148.8.205<0> +jean-zay-iam37:261382:261382 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam41:276751:276751 [5] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam41:276749:276749 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam41:276746:276746 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam41:276753:276753 [7] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam41:276748:276748 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam41:276752:276752 [6] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam41:276747:276747 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam41:276750:276750 [4] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam52:263017:263017 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam52:263016:263016 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam40:289968:289968 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam40:289974:289974 [7] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam52:263021:263021 [6] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam52:263015:263015 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam52:263019:263019 [4] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam52:263020:263020 [5] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam40:289973:289973 [6] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam52:263018:263018 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam52:263022:263022 [7] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam40:289972:289972 [5] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam40:289969:289969 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam40:289971:289971 [4] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam40:289967:289967 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam40:289970:289970 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam37:261384:261384 [5] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.203<0> +jean-zay-iam37:261384:261384 [5] NCCL INFO Using network IB +jean-zay-iam37:261380:261380 [1] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.203<0> +jean-zay-iam37:261380:261380 [1] NCCL INFO Using network IB +jean-zay-iam37:261383:261383 [4] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.203<0> +jean-zay-iam37:261381:261381 [2] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.203<0> +jean-zay-iam37:261386:261386 [7] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.203<0> +jean-zay-iam37:261381:261381 [2] NCCL INFO Using network IB +jean-zay-iam37:261383:261383 [4] NCCL INFO Using network IB +jean-zay-iam37:261386:261386 [7] NCCL INFO Using network IB +jean-zay-iam37:261382:261382 [3] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.203<0> +jean-zay-iam37:261382:261382 [3] NCCL INFO Using network IB +jean-zay-iam41:276751:276751 [5] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.206<0> +jean-zay-iam41:276751:276751 [5] NCCL INFO Using network IB +jean-zay-iam41:276748:276748 [2] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.206<0> +jean-zay-iam41:276748:276748 [2] NCCL INFO Using network IB +jean-zay-iam41:276747:276747 [1] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.206<0> +jean-zay-iam41:276747:276747 [1] NCCL INFO Using network IB +jean-zay-iam41:276752:276752 [6] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.206<0> +jean-zay-iam41:276746:276746 [0] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.206<0> +jean-zay-iam41:276749:276749 [3] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.206<0> +jean-zay-iam41:276753:276753 [7] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.206<0> +jean-zay-iam41:276749:276749 [3] NCCL INFO Using network IB +jean-zay-iam41:276750:276750 [4] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.206<0> +jean-zay-iam41:276746:276746 [0] NCCL INFO Using network IB +jean-zay-iam41:276753:276753 [7] NCCL INFO Using network IB +jean-zay-iam41:276752:276752 [6] NCCL INFO Using network IB +jean-zay-iam41:276750:276750 [4] NCCL INFO Using network IB +jean-zay-iam52:263022:263022 [7] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.217<0> +jean-zay-iam52:263022:263022 [7] NCCL INFO Using network IB +jean-zay-iam52:263019:263019 [4] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.217<0> +jean-zay-iam52:263019:263019 [4] NCCL INFO Using network IB +jean-zay-iam52:263021:263021 [6] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.217<0> +jean-zay-iam52:263021:263021 [6] NCCL INFO Using network IB +jean-zay-iam52:263017:263017 [2] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.217<0> +jean-zay-iam52:263017:263017 [2] NCCL INFO Using network IB +jean-zay-iam52:263020:263020 [5] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.217<0> +jean-zay-iam52:263020:263020 [5] NCCL INFO Using network IB +jean-zay-iam52:263015:263015 [0] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.217<0> +jean-zay-iam52:263018:263018 [3] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.217<0> +jean-zay-iam52:263015:263015 [0] NCCL INFO Using network IB +jean-zay-iam52:263018:263018 [3] NCCL INFO Using network IB +jean-zay-iam52:263016:263016 [1] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.217<0> +jean-zay-iam52:263016:263016 [1] NCCL INFO Using network IB +jean-zay-iam40:289968:289968 [1] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.205<0> +jean-zay-iam40:289968:289968 [1] NCCL INFO Using network IB +jean-zay-iam40:289972:289972 [5] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.205<0> +jean-zay-iam40:289972:289972 [5] NCCL INFO Using network IB +jean-zay-iam40:289969:289969 [2] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.205<0> +jean-zay-iam40:289969:289969 [2] NCCL INFO Using network IB +jean-zay-iam40:289973:289973 [6] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.205<0> +jean-zay-iam40:289973:289973 [6] NCCL INFO Using network IB +jean-zay-iam40:289970:289970 [3] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.205<0> +jean-zay-iam40:289970:289970 [3] NCCL INFO Using network IB +jean-zay-iam40:289967:289967 [0] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.205<0> +jean-zay-iam40:289967:289967 [0] NCCL INFO Using network IB +jean-zay-iam40:289974:289974 [7] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.205<0> +jean-zay-iam40:289971:289971 [4] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.205<0> +jean-zay-iam40:289974:289974 [7] NCCL INFO Using network IB +jean-zay-iam40:289971:289971 [4] NCCL INFO Using network IB +jean-zay-iam37:261385:261385 [6] NCCL INFO Bootstrap : Using ib0:10.148.8.203<0> +jean-zay-iam37:261385:261385 [6] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +jean-zay-iam37:261385:261385 [6] NCCL INFO NET/IB : Using [0]hfi1_1:1/IB [1]hfi1_0:1/IB [2]hfi1_2:1/IB [3]hfi1_3:1/IB ; OOB ib0:10.148.8.203<0> +jean-zay-iam37:261385:261385 [6] NCCL INFO Using network IB +jean-zay-iam52:263016:263142 [1] NCCL INFO Trees [0] 26/-1/-1->25->24 [1] 26/-1/-1->25->24 +jean-zay-iam52:263016:263142 [1] NCCL INFO Setting affinity for GPU 1 to ffffffff +jean-zay-iam52:263017:263138 [2] NCCL INFO Trees [0] 27/-1/-1->26->25 [1] 27/-1/-1->26->25 +jean-zay-iam52:263017:263138 [2] NCCL INFO Setting affinity for GPU 2 to ffffffff +jean-zay-iam52:263018:263141 [3] NCCL INFO Trees [0] 28/-1/-1->27->26 [1] 28/-1/-1->27->26 +jean-zay-iam52:263018:263141 [3] NCCL INFO Setting affinity for GPU 3 to ffffffff +jean-zay-iam52:263021:263137 [6] NCCL INFO Trees [0] 31/-1/-1->30->29 [1] 31/-1/-1->30->29 +jean-zay-iam52:263020:263139 [5] NCCL INFO Trees [0] 30/-1/-1->29->28 [1] 30/-1/-1->29->28 +jean-zay-iam52:263021:263137 [6] NCCL INFO Setting affinity for GPU 6 to ff,00000000 +jean-zay-iam52:263020:263139 [5] NCCL INFO Setting affinity for GPU 5 to ff,00000000 +jean-zay-iam52:263019:263136 [4] NCCL INFO Trees [0] 29/-1/-1->28->27 [1] 29/-1/-1->28->27 +jean-zay-iam52:263022:263135 [7] NCCL INFO Trees [0] -1/-1/-1->31->30 [1] -1/-1/-1->31->30 +jean-zay-iam52:263019:263136 [4] NCCL INFO Setting affinity for GPU 4 to ff,00000000 +jean-zay-iam52:263022:263135 [7] NCCL INFO Setting affinity for GPU 7 to ff,00000000 +jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 00/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 +jean-zay-iam37:261381:261500 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 +jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 01/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 +jean-zay-iam37:261379:261471 [0] NCCL INFO Trees [0] 1/16/-1->0->-1 [1] 1/-1/-1->0->8 +jean-zay-iam37:261380:261497 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 +jean-zay-iam37:261381:261500 [2] NCCL INFO Setting affinity for GPU 2 to ffffffff +jean-zay-iam37:261379:261471 [0] NCCL INFO Setting affinity for GPU 0 to ffffffff +jean-zay-iam37:261380:261497 [1] NCCL INFO Setting affinity for GPU 1 to ffffffff +jean-zay-iam41:276749:276869 [3] NCCL INFO Trees [0] 20/-1/-1->19->18 [1] 20/-1/-1->19->18 +jean-zay-iam41:276751:276865 [5] NCCL INFO Trees [0] 22/-1/-1->21->20 [1] 22/-1/-1->21->20 +jean-zay-iam41:276749:276869 [3] NCCL INFO Setting affinity for GPU 3 to ffffffff +jean-zay-iam41:276750:276871 [4] NCCL INFO Trees [0] 21/-1/-1->20->19 [1] 21/-1/-1->20->19 +jean-zay-iam41:276752:276872 [6] NCCL INFO Trees [0] 23/-1/-1->22->21 [1] 23/-1/-1->22->21 +jean-zay-iam52:263015:263140 [0] NCCL INFO Trees [0] 25/-1/-1->24->16 [1] 25/8/-1->24->-1 +jean-zay-iam52:263015:263140 [0] NCCL INFO Setting affinity for GPU 0 to ffffffff +jean-zay-iam40:289974:290092 [7] NCCL INFO Trees [0] -1/-1/-1->15->14 [1] -1/-1/-1->15->14 +jean-zay-iam41:276751:276865 [5] NCCL INFO Setting affinity for GPU 5 to ff,00000000 +jean-zay-iam41:276750:276871 [4] NCCL INFO Setting affinity for GPU 4 to ff,00000000 +jean-zay-iam41:276752:276872 [6] NCCL INFO Setting affinity for GPU 6 to ff,00000000 +jean-zay-iam41:276753:276870 [7] NCCL INFO Trees [0] -1/-1/-1->23->22 [1] -1/-1/-1->23->22 +jean-zay-iam41:276753:276870 [7] NCCL INFO Setting affinity for GPU 7 to ff,00000000 +jean-zay-iam40:289974:290092 [7] NCCL INFO Setting affinity for GPU 7 to ff,00000000 +jean-zay-iam41:276747:276867 [1] NCCL INFO Trees [0] 18/8/-1->17->16 [1] 18/-1/-1->17->16 +jean-zay-iam41:276748:276866 [2] NCCL INFO Trees [0] 19/-1/-1->18->17 [1] 19/-1/-1->18->17 +jean-zay-iam41:276746:276868 [0] NCCL INFO Trees [0] 17/24/-1->16->0 [1] 17/-1/-1->16->9 +jean-zay-iam41:276748:276866 [2] NCCL INFO Setting affinity for GPU 2 to ffffffff +jean-zay-iam41:276746:276868 [0] NCCL INFO Setting affinity for GPU 0 to ffffffff +jean-zay-iam41:276747:276867 [1] NCCL INFO Setting affinity for GPU 1 to ffffffff +jean-zay-iam37:261382:261501 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 +jean-zay-iam37:261382:261501 [3] NCCL INFO Setting affinity for GPU 3 to ffffffff +jean-zay-iam37:261383:261499 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 +jean-zay-iam37:261383:261499 [4] NCCL INFO Setting affinity for GPU 4 to ff,00000000 +jean-zay-iam37:261385:261506 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 +jean-zay-iam37:261385:261506 [6] NCCL INFO Setting affinity for GPU 6 to ff,00000000 +jean-zay-iam37:261384:261496 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 +jean-zay-iam37:261384:261496 [5] NCCL INFO Setting affinity for GPU 5 to ff,00000000 +jean-zay-iam37:261386:261498 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 +jean-zay-iam37:261386:261498 [7] NCCL INFO Setting affinity for GPU 7 to ff,00000000 +jean-zay-iam40:289967:290091 [0] NCCL INFO Trees [0] 9/-1/-1->8->17 [1] 9/0/-1->8->24 +jean-zay-iam40:289967:290091 [0] NCCL INFO Setting affinity for GPU 0 to ffffffff +jean-zay-iam40:289969:290087 [2] NCCL INFO Trees [0] 11/-1/-1->10->9 [1] 11/-1/-1->10->9 +jean-zay-iam40:289970:290090 [3] NCCL INFO Trees [0] 12/-1/-1->11->10 [1] 12/-1/-1->11->10 +jean-zay-iam40:289968:290086 [1] NCCL INFO Trees [0] 10/-1/-1->9->8 [1] 10/16/-1->9->8 +jean-zay-iam40:289973:290089 [6] NCCL INFO Trees [0] 15/-1/-1->14->13 [1] 15/-1/-1->14->13 +jean-zay-iam40:289969:290087 [2] NCCL INFO Setting affinity for GPU 2 to ffffffff +jean-zay-iam40:289970:290090 [3] NCCL INFO Setting affinity for GPU 3 to ffffffff +jean-zay-iam40:289973:290089 [6] NCCL INFO Setting affinity for GPU 6 to ff,00000000 +jean-zay-iam40:289968:290086 [1] NCCL INFO Setting affinity for GPU 1 to ffffffff +jean-zay-iam40:289972:290088 [5] NCCL INFO Trees [0] 14/-1/-1->13->12 [1] 14/-1/-1->13->12 +jean-zay-iam40:289971:290093 [4] NCCL INFO Trees [0] 13/-1/-1->12->11 [1] 13/-1/-1->12->11 +jean-zay-iam40:289972:290088 [5] NCCL INFO Setting affinity for GPU 5 to ff,00000000 +jean-zay-iam40:289971:290093 [4] NCCL INFO Setting affinity for GPU 4 to ff,00000000 +jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 00 : 31[cb000] -> 0[7000] [receive] via NET/IB/1 +jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 00 : 7[cb000] -> 8[7000] [receive] via NET/IB/1 +jean-zay-iam52:263015:263140 [0] NCCL INFO Channel 00 : 23[cb000] -> 24[7000] [receive] via NET/IB/1 +jean-zay-iam52:263016:263142 [1] NCCL INFO Channel 00 : 25[b000] -> 26[48000] via P2P/IPC/read +jean-zay-iam52:263017:263138 [2] NCCL INFO Channel 00 : 26[48000] -> 27[4c000] via P2P/IPC/read +jean-zay-iam52:263018:263141 [3] NCCL INFO Channel 00 : 27[4c000] -> 28[88000] via P2P/IPC/read +jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 00 : 15[cb000] -> 16[7000] [receive] via NET/IB/1 +jean-zay-iam52:263020:263139 [5] NCCL INFO Channel 00 : 29[8b000] -> 30[c8000] via P2P/IPC/read +jean-zay-iam40:289974:290092 [7] NCCL INFO Channel 00 : 15[cb000] -> 16[7000] [send] via NET/IB/3 +jean-zay-iam41:276752:276872 [6] NCCL INFO Channel 00 : 22[c8000] -> 23[cb000] via P2P/IPC/read +jean-zay-iam52:263019:263136 [4] NCCL INFO Channel 00 : 28[88000] -> 29[8b000] via P2P/IPC/read +jean-zay-iam52:263021:263137 [6] NCCL INFO Channel 00 : 30[c8000] -> 31[cb000] via P2P/IPC/read +jean-zay-iam52:263016:263142 [1] NCCL INFO Channel 01 : 25[b000] -> 26[48000] via P2P/IPC/read +jean-zay-iam41:276750:276871 [4] NCCL INFO Channel 00 : 20[88000] -> 21[8b000] via P2P/IPC/read +jean-zay-iam52:263015:263140 [0] NCCL INFO Channel 01 : 23[cb000] -> 24[7000] [receive] via NET/IB/1 +jean-zay-iam52:263017:263138 [2] NCCL INFO Channel 01 : 26[48000] -> 27[4c000] via P2P/IPC/read +jean-zay-iam41:276751:276865 [5] NCCL INFO Channel 00 : 21[8b000] -> 22[c8000] via P2P/IPC/read +jean-zay-iam52:263022:263135 [7] NCCL INFO Channel 00 : 31[cb000] -> 0[7000] [send] via NET/IB/3 +jean-zay-iam41:276749:276869 [3] NCCL INFO Channel 00 : 19[4c000] -> 20[88000] via P2P/IPC/read +jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 01 : 31[cb000] -> 0[7000] [receive] via NET/IB/1 +jean-zay-iam37:261381:261500 [2] NCCL INFO Channel 00 : 2[48000] -> 3[4c000] via P2P/IPC/read +jean-zay-iam52:263018:263141 [3] NCCL INFO Channel 01 : 27[4c000] -> 28[88000] via P2P/IPC/read +jean-zay-iam41:276747:276867 [1] NCCL INFO Channel 00 : 17[b000] -> 18[48000] via P2P/IPC/read +jean-zay-iam41:276748:276866 [2] NCCL INFO Channel 00 : 18[48000] -> 19[4c000] via P2P/IPC/read +jean-zay-iam52:263020:263139 [5] NCCL INFO Channel 01 : 29[8b000] -> 30[c8000] via P2P/IPC/read +jean-zay-iam37:261380:261497 [1] NCCL INFO Channel 00 : 1[b000] -> 2[48000] via P2P/IPC/read +jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 01 : 15[cb000] -> 16[7000] [receive] via NET/IB/1 +jean-zay-iam37:261382:261501 [3] NCCL INFO Channel 00 : 3[4c000] -> 4[88000] via P2P/IPC/read +jean-zay-iam37:261385:261506 [6] NCCL INFO Channel 00 : 6[c8000] -> 7[cb000] via P2P/IPC/read +jean-zay-iam41:276752:276872 [6] NCCL INFO Channel 01 : 22[c8000] -> 23[cb000] via P2P/IPC/read +jean-zay-iam52:263019:263136 [4] NCCL INFO Channel 01 : 28[88000] -> 29[8b000] via P2P/IPC/read +jean-zay-iam52:263021:263137 [6] NCCL INFO Channel 01 : 30[c8000] -> 31[cb000] via P2P/IPC/read +jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 01 : 7[cb000] -> 8[7000] [receive] via NET/IB/1 +jean-zay-iam40:289968:290086 [1] NCCL INFO Channel 00 : 9[b000] -> 10[48000] via P2P/IPC/read +jean-zay-iam52:263015:263140 [0] NCCL INFO Channel 00 : 24[7000] -> 25[b000] via P2P/IPC/read +jean-zay-iam41:276753:276870 [7] NCCL INFO Channel 00 : 23[cb000] -> 24[7000] [send] via NET/IB/3 +jean-zay-iam41:276750:276871 [4] NCCL INFO Channel 01 : 20[88000] -> 21[8b000] via P2P/IPC/read +jean-zay-iam40:289970:290090 [3] NCCL INFO Channel 00 : 11[4c000] -> 12[88000] via P2P/IPC/read +jean-zay-iam40:289969:290087 [2] NCCL INFO Channel 00 : 10[48000] -> 11[4c000] via P2P/IPC/read +jean-zay-iam40:289973:290089 [6] NCCL INFO Channel 00 : 14[c8000] -> 15[cb000] via P2P/IPC/read +jean-zay-iam40:289972:290088 [5] NCCL INFO Channel 00 : 13[8b000] -> 14[c8000] via P2P/IPC/read +jean-zay-iam37:261386:261498 [7] NCCL INFO Channel 00 : 7[cb000] -> 8[7000] [send] via NET/IB/3 +jean-zay-iam41:276751:276865 [5] NCCL INFO Channel 01 : 21[8b000] -> 22[c8000] via P2P/IPC/read +jean-zay-iam41:276749:276869 [3] NCCL INFO Channel 01 : 19[4c000] -> 20[88000] via P2P/IPC/read +jean-zay-iam40:289974:290092 [7] NCCL INFO Channel 01 : 15[cb000] -> 16[7000] [send] via NET/IB/3 +jean-zay-iam40:289971:290093 [4] NCCL INFO Channel 00 : 12[88000] -> 13[8b000] via P2P/IPC/read +jean-zay-iam41:276747:276867 [1] NCCL INFO Channel 01 : 17[b000] -> 18[48000] via P2P/IPC/read +jean-zay-iam41:276748:276866 [2] NCCL INFO Channel 01 : 18[48000] -> 19[4c000] via P2P/IPC/read +jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 00 : 16[7000] -> 17[b000] via P2P/IPC/read +jean-zay-iam37:261383:261499 [4] NCCL INFO Channel 00 : 4[88000] -> 5[8b000] via P2P/IPC/read +jean-zay-iam52:263022:263135 [7] NCCL INFO Channel 01 : 31[cb000] -> 0[7000] [send] via NET/IB/3 +jean-zay-iam37:261384:261496 [5] NCCL INFO Channel 00 : 5[8b000] -> 6[c8000] via P2P/IPC/read +jean-zay-iam40:289968:290086 [1] NCCL INFO Channel 01 : 9[b000] -> 10[48000] via P2P/IPC/read +jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 00 : 8[7000] -> 9[b000] via P2P/IPC/read +jean-zay-iam40:289970:290090 [3] NCCL INFO Channel 01 : 11[4c000] -> 12[88000] via P2P/IPC/read +jean-zay-iam40:289969:290087 [2] NCCL INFO Channel 01 : 10[48000] -> 11[4c000] via P2P/IPC/read +jean-zay-iam40:289972:290088 [5] NCCL INFO Channel 01 : 13[8b000] -> 14[c8000] via P2P/IPC/read +jean-zay-iam40:289973:290089 [6] NCCL INFO Channel 01 : 14[c8000] -> 15[cb000] via P2P/IPC/read +jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 00 : 0[7000] -> 1[b000] via P2P/IPC/read +jean-zay-iam37:261381:261500 [2] NCCL INFO Channel 01 : 2[48000] -> 3[4c000] via P2P/IPC/read +jean-zay-iam40:289971:290093 [4] NCCL INFO Channel 01 : 12[88000] -> 13[8b000] via P2P/IPC/read +jean-zay-iam37:261380:261497 [1] NCCL INFO Channel 01 : 1[b000] -> 2[48000] via P2P/IPC/read +jean-zay-iam37:261382:261501 [3] NCCL INFO Channel 01 : 3[4c000] -> 4[88000] via P2P/IPC/read +jean-zay-iam37:261385:261506 [6] NCCL INFO Channel 01 : 6[c8000] -> 7[cb000] via P2P/IPC/read +jean-zay-iam37:261386:261498 [7] NCCL INFO Channel 01 : 7[cb000] -> 8[7000] [send] via NET/IB/3 +jean-zay-iam41:276753:276870 [7] NCCL INFO Channel 01 : 23[cb000] -> 24[7000] [send] via NET/IB/3 +jean-zay-iam52:263015:263140 [0] NCCL INFO Channel 01 : 24[7000] -> 25[b000] via P2P/IPC/read +jean-zay-iam37:261383:261499 [4] NCCL INFO Channel 01 : 4[88000] -> 5[8b000] via P2P/IPC/read +jean-zay-iam37:261384:261496 [5] NCCL INFO Channel 01 : 5[8b000] -> 6[c8000] via P2P/IPC/read +jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 01 : 8[7000] -> 9[b000] via P2P/IPC/read +jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 01 : 16[7000] -> 17[b000] via P2P/IPC/read +jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 01 : 0[7000] -> 1[b000] via P2P/IPC/read +jean-zay-iam52:263017:263138 [2] NCCL INFO Connected all rings +jean-zay-iam41:276753:276870 [7] NCCL INFO Connected all rings +jean-zay-iam52:263018:263141 [3] NCCL INFO Connected all rings +jean-zay-iam52:263015:263140 [0] NCCL INFO Connected all rings +jean-zay-iam52:263019:263136 [4] NCCL INFO Connected all rings +jean-zay-iam37:261386:261498 [7] NCCL INFO Connected all rings +jean-zay-iam41:276751:276865 [5] NCCL INFO Connected all rings +jean-zay-iam40:289967:290091 [0] NCCL INFO Connected all rings +jean-zay-iam52:263020:263139 [5] NCCL INFO Connected all rings +jean-zay-iam40:289974:290092 [7] NCCL INFO Connected all rings +jean-zay-iam41:276750:276871 [4] NCCL INFO Connected all rings +jean-zay-iam41:276746:276868 [0] NCCL INFO Connected all rings +jean-zay-iam52:263022:263135 [7] NCCL INFO Connected all rings +jean-zay-iam37:261379:261471 [0] NCCL INFO Connected all rings +jean-zay-iam41:276748:276866 [2] NCCL INFO Connected all rings +jean-zay-iam41:276749:276869 [3] NCCL INFO Connected all rings +jean-zay-iam52:263021:263137 [6] NCCL INFO Connected all rings +jean-zay-iam40:289973:290089 [6] NCCL INFO Connected all rings +jean-zay-iam40:289969:290087 [2] NCCL INFO Connected all rings +jean-zay-iam52:263015:263140 [0] NCCL INFO Channel 00 : 16[7000] -> 24[7000] [receive] via NET/IB/1 +jean-zay-iam41:276753:276870 [7] NCCL INFO Channel 00 : 23[cb000] -> 22[c8000] via P2P/IPC/read +jean-zay-iam37:261381:261500 [2] NCCL INFO Connected all rings +jean-zay-iam40:289972:290088 [5] NCCL INFO Connected all rings +jean-zay-iam52:263016:263142 [1] NCCL INFO Connected all rings +jean-zay-iam52:263022:263135 [7] NCCL INFO Channel 00 : 31[cb000] -> 30[c8000] via P2P/IPC/read +jean-zay-iam41:276752:276872 [6] NCCL INFO Connected all rings +jean-zay-iam40:289970:290090 [3] NCCL INFO Connected all rings +jean-zay-iam40:289971:290093 [4] NCCL INFO Connected all rings +jean-zay-iam37:261386:261498 [7] NCCL INFO Channel 00 : 7[cb000] -> 6[c8000] via P2P/IPC/read +jean-zay-iam37:261382:261501 [3] NCCL INFO Connected all rings +jean-zay-iam40:289974:290092 [7] NCCL INFO Channel 00 : 15[cb000] -> 14[c8000] via P2P/IPC/read +jean-zay-iam41:276747:276867 [1] NCCL INFO Connected all rings +jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 01 : 0[7000] -> 8[7000] [receive] via NET/IB/1 +jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 01 : 9[b000] -> 16[7000] [receive] via NET/IB/1 +jean-zay-iam37:261384:261496 [5] NCCL INFO Connected all rings +jean-zay-iam37:261383:261499 [4] NCCL INFO Connected all rings +jean-zay-iam40:289968:290086 [1] NCCL INFO Connected all rings +jean-zay-iam37:261385:261506 [6] NCCL INFO Connected all rings +jean-zay-iam52:263022:263135 [7] NCCL INFO Channel 01 : 31[cb000] -> 30[c8000] via P2P/IPC/read +jean-zay-iam41:276753:276870 [7] NCCL INFO Channel 01 : 23[cb000] -> 22[c8000] via P2P/IPC/read +jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 01 : 0[7000] -> 8[7000] [send] via NET/IB/1 +jean-zay-iam52:263017:263138 [2] NCCL INFO Channel 00 : 26[48000] -> 25[b000] via P2P/IPC/read +jean-zay-iam37:261380:261497 [1] NCCL INFO Connected all rings +jean-zay-iam40:289974:290092 [7] NCCL INFO Channel 01 : 15[cb000] -> 14[c8000] via P2P/IPC/read +jean-zay-iam52:263018:263141 [3] NCCL INFO Channel 00 : 27[4c000] -> 26[48000] via P2P/IPC/read +jean-zay-iam52:263019:263136 [4] NCCL INFO Channel 00 : 28[88000] -> 27[4c000] via P2P/IPC/read +jean-zay-iam52:263020:263139 [5] NCCL INFO Channel 00 : 29[8b000] -> 28[88000] via P2P/IPC/read +jean-zay-iam41:276751:276865 [5] NCCL INFO Channel 00 : 21[8b000] -> 20[88000] via P2P/IPC/read +jean-zay-iam41:276747:276867 [1] NCCL INFO Channel 00 : 8[7000] -> 17[b000] [receive] via NET/IB/1 +jean-zay-iam37:261386:261498 [7] NCCL INFO Channel 01 : 7[cb000] -> 6[c8000] via P2P/IPC/read +jean-zay-iam41:276750:276871 [4] NCCL INFO Channel 00 : 20[88000] -> 19[4c000] via P2P/IPC/read +jean-zay-iam52:263017:263138 [2] NCCL INFO Channel 01 : 26[48000] -> 25[b000] via P2P/IPC/read +jean-zay-iam52:263021:263137 [6] NCCL INFO Channel 00 : 30[c8000] -> 29[8b000] via P2P/IPC/read +jean-zay-iam41:276748:276866 [2] NCCL INFO Channel 00 : 18[48000] -> 17[b000] via P2P/IPC/read +jean-zay-iam41:276749:276869 [3] NCCL INFO Channel 00 : 19[4c000] -> 18[48000] via P2P/IPC/read +jean-zay-iam40:289968:290086 [1] NCCL INFO Channel 01 : 9[b000] -> 16[7000] [send] via NET/IB/1 +jean-zay-iam52:263018:263141 [3] NCCL INFO Channel 01 : 27[4c000] -> 26[48000] via P2P/IPC/read +jean-zay-iam52:263019:263136 [4] NCCL INFO Channel 01 : 28[88000] -> 27[4c000] via P2P/IPC/read +jean-zay-iam52:263020:263139 [5] NCCL INFO Channel 01 : 29[8b000] -> 28[88000] via P2P/IPC/read +jean-zay-iam52:263016:263142 [1] NCCL INFO Channel 00 : 25[b000] -> 24[7000] via P2P/IPC/read +jean-zay-iam41:276751:276865 [5] NCCL INFO Channel 01 : 21[8b000] -> 20[88000] via P2P/IPC/read +jean-zay-iam41:276750:276871 [4] NCCL INFO Channel 01 : 20[88000] -> 19[4c000] via P2P/IPC/read +jean-zay-iam41:276752:276872 [6] NCCL INFO Channel 00 : 22[c8000] -> 21[8b000] via P2P/IPC/read +jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 00 : 16[7000] -> 0[7000] [receive] via NET/IB/1 +jean-zay-iam41:276748:276866 [2] NCCL INFO Channel 01 : 18[48000] -> 17[b000] via P2P/IPC/read +jean-zay-iam41:276749:276869 [3] NCCL INFO Channel 01 : 19[4c000] -> 18[48000] via P2P/IPC/read +jean-zay-iam52:263021:263137 [6] NCCL INFO Channel 01 : 30[c8000] -> 29[8b000] via P2P/IPC/read +jean-zay-iam40:289973:290089 [6] NCCL INFO Channel 00 : 14[c8000] -> 13[8b000] via P2P/IPC/read +jean-zay-iam41:276752:276872 [6] NCCL INFO Channel 01 : 22[c8000] -> 21[8b000] via P2P/IPC/read +jean-zay-iam40:289969:290087 [2] NCCL INFO Channel 00 : 10[48000] -> 9[b000] via P2P/IPC/read +jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 00 : 8[7000] -> 17[b000] [send] via NET/IB/1 +jean-zay-iam40:289970:290090 [3] NCCL INFO Channel 00 : 11[4c000] -> 10[48000] via P2P/IPC/read +jean-zay-iam40:289972:290088 [5] NCCL INFO Channel 00 : 13[8b000] -> 12[88000] via P2P/IPC/read +jean-zay-iam40:289971:290093 [4] NCCL INFO Channel 00 : 12[88000] -> 11[4c000] via P2P/IPC/read +jean-zay-iam37:261381:261500 [2] NCCL INFO Channel 00 : 2[48000] -> 1[b000] via P2P/IPC/read +jean-zay-iam52:263016:263142 [1] NCCL INFO Channel 01 : 25[b000] -> 24[7000] via P2P/IPC/read +jean-zay-iam37:261382:261501 [3] NCCL INFO Channel 00 : 3[4c000] -> 2[48000] via P2P/IPC/read +jean-zay-iam37:261385:261506 [6] NCCL INFO Channel 00 : 6[c8000] -> 5[8b000] via P2P/IPC/read +jean-zay-iam37:261384:261496 [5] NCCL INFO Channel 00 : 5[8b000] -> 4[88000] via P2P/IPC/read +jean-zay-iam40:289973:290089 [6] NCCL INFO Channel 01 : 14[c8000] -> 13[8b000] via P2P/IPC/read +jean-zay-iam40:289969:290087 [2] NCCL INFO Channel 01 : 10[48000] -> 9[b000] via P2P/IPC/read +jean-zay-iam40:289968:290086 [1] NCCL INFO Channel 01 : 16[7000] -> 9[b000] [receive] via NET/IB/1 +jean-zay-iam37:261383:261499 [4] NCCL INFO Channel 00 : 4[88000] -> 3[4c000] via P2P/IPC/read +jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 00 : 0[7000] -> 16[7000] [send] via NET/IB/1 +jean-zay-iam40:289970:290090 [3] NCCL INFO Channel 01 : 11[4c000] -> 10[48000] via P2P/IPC/read +jean-zay-iam40:289972:290088 [5] NCCL INFO Channel 01 : 13[8b000] -> 12[88000] via P2P/IPC/read +jean-zay-iam40:289971:290093 [4] NCCL INFO Channel 01 : 12[88000] -> 11[4c000] via P2P/IPC/read +jean-zay-iam37:261380:261497 [1] NCCL INFO Channel 00 : 1[b000] -> 0[7000] via P2P/IPC/read +jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 01 : 24[7000] -> 8[7000] [receive] via NET/IB/1 +jean-zay-iam37:261381:261500 [2] NCCL INFO Channel 01 : 2[48000] -> 1[b000] via P2P/IPC/read +jean-zay-iam37:261382:261501 [3] NCCL INFO Channel 01 : 3[4c000] -> 2[48000] via P2P/IPC/read +jean-zay-iam37:261385:261506 [6] NCCL INFO Channel 01 : 6[c8000] -> 5[8b000] via P2P/IPC/read +jean-zay-iam37:261384:261496 [5] NCCL INFO Channel 01 : 5[8b000] -> 4[88000] via P2P/IPC/read +jean-zay-iam37:261383:261499 [4] NCCL INFO Channel 01 : 4[88000] -> 3[4c000] via P2P/IPC/read +jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 00 : 16[7000] -> 24[7000] [send] via NET/IB/1 +jean-zay-iam37:261380:261497 [1] NCCL INFO Channel 01 : 1[b000] -> 0[7000] via P2P/IPC/read +jean-zay-iam41:276753:276870 [7] NCCL INFO Connected all trees +jean-zay-iam41:276753:276870 [7] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam41:276753:276870 [7] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam52:263022:263135 [7] NCCL INFO Connected all trees +jean-zay-iam52:263022:263135 [7] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam52:263022:263135 [7] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam41:276747:276867 [1] NCCL INFO Channel 00 : 17[b000] -> 8[7000] [send] via NET/IB/1 +jean-zay-iam40:289974:290092 [7] NCCL INFO Connected all trees +jean-zay-iam40:289974:290092 [7] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam40:289974:290092 [7] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam52:263018:263141 [3] NCCL INFO Connected all trees +jean-zay-iam52:263018:263141 [3] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam52:263018:263141 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 01 : 8[7000] -> 24[7000] [send] via NET/IB/1 +jean-zay-iam52:263019:263136 [4] NCCL INFO Connected all trees +jean-zay-iam52:263019:263136 [4] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam52:263019:263136 [4] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam37:261386:261498 [7] NCCL INFO Connected all trees +jean-zay-iam37:261386:261498 [7] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam37:261386:261498 [7] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam41:276750:276871 [4] NCCL INFO Connected all trees +jean-zay-iam41:276750:276871 [4] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam41:276750:276871 [4] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 00 : 0[7000] -> 16[7000] [receive] via NET/IB/1 +jean-zay-iam41:276749:276869 [3] NCCL INFO Connected all trees +jean-zay-iam41:276749:276869 [3] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam41:276749:276869 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam41:276752:276872 [6] NCCL INFO Connected all trees +jean-zay-iam41:276752:276872 [6] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam52:263021:263137 [6] NCCL INFO Connected all trees +jean-zay-iam41:276752:276872 [6] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam52:263021:263137 [6] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam52:263021:263137 [6] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam41:276751:276865 [5] NCCL INFO Connected all trees +jean-zay-iam41:276751:276865 [5] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam41:276751:276865 [5] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam52:263015:263140 [0] NCCL INFO Channel 01 : 8[7000] -> 24[7000] [receive] via NET/IB/1 +jean-zay-iam52:263020:263139 [5] NCCL INFO Connected all trees +jean-zay-iam52:263020:263139 [5] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam52:263020:263139 [5] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam52:263017:263138 [2] NCCL INFO Connected all trees +jean-zay-iam52:263017:263138 [2] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam52:263017:263138 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam40:289973:290089 [6] NCCL INFO Connected all trees +jean-zay-iam40:289973:290089 [6] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam40:289973:290089 [6] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam40:289970:290090 [3] NCCL INFO Connected all trees +jean-zay-iam40:289970:290090 [3] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam40:289970:290090 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 00 : 16[7000] -> 0[7000] [send] via NET/IB/1 +jean-zay-iam40:289971:290093 [4] NCCL INFO Connected all trees +jean-zay-iam40:289971:290093 [4] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam40:289971:290093 [4] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam40:289972:290088 [5] NCCL INFO Connected all trees +jean-zay-iam40:289972:290088 [5] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam40:289972:290088 [5] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam52:263015:263140 [0] NCCL INFO Channel 01 : 24[7000] -> 8[7000] [send] via NET/IB/1 +jean-zay-iam37:261385:261506 [6] NCCL INFO Connected all trees +jean-zay-iam37:261385:261506 [6] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam37:261385:261506 [6] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam37:261382:261501 [3] NCCL INFO Connected all trees +jean-zay-iam37:261382:261501 [3] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam37:261382:261501 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam37:261383:261499 [4] NCCL INFO Connected all trees +jean-zay-iam37:261383:261499 [4] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam37:261383:261499 [4] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam37:261384:261496 [5] NCCL INFO Connected all trees +jean-zay-iam37:261384:261496 [5] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam37:261384:261496 [5] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 00 : 24[7000] -> 16[7000] [receive] via NET/IB/1 +jean-zay-iam37:261381:261500 [2] NCCL INFO Connected all trees +jean-zay-iam37:261381:261500 [2] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam37:261381:261500 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam37:261379:261471 [0] NCCL INFO Channel 01 : 8[7000] -> 0[7000] [receive] via NET/IB/1 +jean-zay-iam52:263015:263140 [0] NCCL INFO Channel 00 : 24[7000] -> 16[7000] [send] via NET/IB/1 +jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 00 : 17[b000] -> 8[7000] [receive] via NET/IB/1 +jean-zay-iam41:276747:276867 [1] NCCL INFO Channel 00 : 17[b000] -> 16[7000] via P2P/IPC/read +jean-zay-iam41:276747:276867 [1] NCCL INFO Channel 01 : 17[b000] -> 16[7000] via P2P/IPC/read +jean-zay-iam52:263015:263140 [0] NCCL INFO Connected all trees +jean-zay-iam52:263015:263140 [0] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam52:263015:263140 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam40:289967:290091 [0] NCCL INFO Channel 01 : 8[7000] -> 0[7000] [send] via NET/IB/1 +jean-zay-iam41:276746:276868 [0] NCCL INFO Channel 01 : 16[7000] -> 9[b000] [send] via NET/IB/1 +jean-zay-iam52:263016:263142 [1] NCCL INFO Connected all trees +jean-zay-iam52:263016:263142 [1] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam52:263016:263142 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam52:263019:263136 [4] NCCL INFO comm 0x14b1a8002fb0 rank 28 nranks 32 cudaDev 4 busId 88000 - Init COMPLETE +jean-zay-iam52:263020:263139 [5] NCCL INFO comm 0x151418002fb0 rank 29 nranks 32 cudaDev 5 busId 8b000 - Init COMPLETE +jean-zay-iam52:263016:263142 [1] NCCL INFO comm 0x145588002fb0 rank 25 nranks 32 cudaDev 1 busId b000 - Init COMPLETE +jean-zay-iam52:263015:263140 [0] NCCL INFO comm 0x14c858002fb0 rank 24 nranks 32 cudaDev 0 busId 7000 - Init COMPLETE +jean-zay-iam52:263017:263138 [2] NCCL INFO comm 0x14e858002fb0 rank 26 nranks 32 cudaDev 2 busId 48000 - Init COMPLETE +jean-zay-iam52:263018:263141 [3] NCCL INFO comm 0x150208002fb0 rank 27 nranks 32 cudaDev 3 busId 4c000 - Init COMPLETE +jean-zay-iam52:263021:263137 [6] NCCL INFO comm 0x151df8002fb0 rank 30 nranks 32 cudaDev 6 busId c8000 - Init COMPLETE +jean-zay-iam52:263022:263135 [7] NCCL INFO comm 0x152728002fb0 rank 31 nranks 32 cudaDev 7 busId cb000 - Init COMPLETE +jean-zay-iam41:276748:276866 [2] NCCL INFO Connected all trees +jean-zay-iam41:276748:276866 [2] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam41:276748:276866 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam40:289968:290086 [1] NCCL INFO Channel 00 : 9[b000] -> 8[7000] via P2P/IPC/read +jean-zay-iam40:289968:290086 [1] NCCL INFO Channel 01 : 9[b000] -> 8[7000] via P2P/IPC/read +jean-zay-iam37:261379:261471 [0] NCCL INFO Connected all trees +jean-zay-iam37:261379:261471 [0] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam37:261379:261471 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam41:276746:276868 [0] NCCL INFO Connected all trees +jean-zay-iam41:276746:276868 [0] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam41:276746:276868 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam37:261380:261497 [1] NCCL INFO Connected all trees +jean-zay-iam37:261380:261497 [1] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam37:261380:261497 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam37:261380:261497 [1] NCCL INFO comm 0x151790002fb0 rank 1 nranks 32 cudaDev 1 busId b000 - Init COMPLETE +jean-zay-iam37:261379:261471 [0] NCCL INFO comm 0x151f24002fb0 rank 0 nranks 32 cudaDev 0 busId 7000 - Init COMPLETE +jean-zay-iam37:261382:261501 [3] NCCL INFO comm 0x14a538002fb0 rank 3 nranks 32 cudaDev 3 busId 4c000 - Init COMPLETE +jean-zay-iam37:261381:261500 [2] NCCL INFO comm 0x151028002fb0 rank 2 nranks 32 cudaDev 2 busId 48000 - Init COMPLETE +jean-zay-iam37:261383:261499 [4] NCCL INFO comm 0x152340002fb0 rank 4 nranks 32 cudaDev 4 busId 88000 - Init COMPLETE +jean-zay-iam37:261384:261496 [5] NCCL INFO comm 0x14d048002fb0 rank 5 nranks 32 cudaDev 5 busId 8b000 - Init COMPLETE +jean-zay-iam37:261379:261379 [0] NCCL INFO Launch mode Parallel +jean-zay-iam37:261386:261498 [7] NCCL INFO comm 0x1519b0002fb0 rank 7 nranks 32 cudaDev 7 busId cb000 - Init COMPLETE +jean-zay-iam37:261385:261506 [6] NCCL INFO comm 0x14bd98002fb0 rank 6 nranks 32 cudaDev 6 busId c8000 - Init COMPLETE +jean-zay-iam41:276747:276867 [1] NCCL INFO Connected all trees +jean-zay-iam41:276747:276867 [1] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam41:276747:276867 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam41:276749:276869 [3] NCCL INFO comm 0x14d508002fb0 rank 19 nranks 32 cudaDev 3 busId 4c000 - Init COMPLETE +jean-zay-iam40:289967:290091 [0] NCCL INFO Connected all trees +jean-zay-iam41:276748:276866 [2] NCCL INFO comm 0x14ae78002fb0 rank 18 nranks 32 cudaDev 2 busId 48000 - Init COMPLETE +jean-zay-iam40:289967:290091 [0] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam40:289967:290091 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam41:276747:276867 [1] NCCL INFO comm 0x14d928002fb0 rank 17 nranks 32 cudaDev 1 busId b000 - Init COMPLETE +jean-zay-iam41:276750:276871 [4] NCCL INFO comm 0x146d68002fb0 rank 20 nranks 32 cudaDev 4 busId 88000 - Init COMPLETE +jean-zay-iam41:276753:276870 [7] NCCL INFO comm 0x1523f8002fb0 rank 23 nranks 32 cudaDev 7 busId cb000 - Init COMPLETE +jean-zay-iam41:276746:276868 [0] NCCL INFO comm 0x152f60002fb0 rank 16 nranks 32 cudaDev 0 busId 7000 - Init COMPLETE +jean-zay-iam41:276751:276865 [5] NCCL INFO comm 0x14c788002fb0 rank 21 nranks 32 cudaDev 5 busId 8b000 - Init COMPLETE +jean-zay-iam41:276752:276872 [6] NCCL INFO comm 0x14e538002fb0 rank 22 nranks 32 cudaDev 6 busId c8000 - Init COMPLETE +jean-zay-iam40:289968:290086 [1] NCCL INFO Connected all trees +jean-zay-iam40:289968:290086 [1] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam40:289968:290086 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam40:289969:290087 [2] NCCL INFO Connected all trees +jean-zay-iam40:289969:290087 [2] NCCL INFO threadThresholds 8/8/64 | 256/8/64 | 8/8/512 +jean-zay-iam40:289969:290087 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +jean-zay-iam40:289969:290087 [2] NCCL INFO comm 0x154f98002fb0 rank 10 nranks 32 cudaDev 2 busId 48000 - Init COMPLETE +jean-zay-iam40:289971:290093 [4] NCCL INFO comm 0x1529e8002fb0 rank 12 nranks 32 cudaDev 4 busId 88000 - Init COMPLETE +jean-zay-iam40:289970:290090 [3] NCCL INFO comm 0x14ee38002fb0 rank 11 nranks 32 cudaDev 3 busId 4c000 - Init COMPLETE +jean-zay-iam40:289973:290089 [6] NCCL INFO comm 0x145bb0002fb0 rank 14 nranks 32 cudaDev 6 busId c8000 - Init COMPLETE +jean-zay-iam40:289972:290088 [5] NCCL INFO comm 0x14d508002fb0 rank 13 nranks 32 cudaDev 5 busId 8b000 - Init COMPLETE +jean-zay-iam40:289968:290086 [1] NCCL INFO comm 0x14d558002fb0 rank 9 nranks 32 cudaDev 1 busId b000 - Init COMPLETE +jean-zay-iam40:289974:290092 [7] NCCL INFO comm 0x1494b8002fb0 rank 15 nranks 32 cudaDev 7 busId cb000 - Init COMPLETE +jean-zay-iam40:289967:290091 [0] NCCL INFO comm 0x14aa40002fb0 rank 8 nranks 32 cudaDev 0 busId 7000 - Init COMPLETE +ignore me 17 +6: + duration: 3.9563 sec + algo throughput: 16176643777.3540 bps, 16.1766 Gbps + busbw: 15.6711 Gbps +ignore me 17 +7: + duration: 4.1011 sec + algo throughput: 15605538666.8284 bps, 15.6055 Gbps + busbw: 15.1179 Gbps +ignore me 17 +5: + duration: 4.0281 sec + algo throughput: 15888388696.7879 bps, 15.8884 Gbps + busbw: 15.3919 Gbps +ignore me 17 +ignore me 17 +27: + duration: 4.1446 sec + algo throughput: 15441789907.3424 bps, 15.4418 Gbps + busbw: 14.9592 Gbps +4: + duration: 4.1584 sec + algo throughput: 15390377253.3963 bps, 15.3904 Gbps + busbw: 14.9094 Gbps +ignore me 17 +ignore me 17 +28: + duration: 4.0857 sec +ignore me 17 + algo throughput: 15664581341.3504 bps, 15.6646 Gbps + busbw: 15.1751 Gbps +26: + duration: 4.1296 sec + algo throughput: 15497834133.7166 bps, 15.4978 Gbps + busbw: 15.0135 Gbps +3: + duration: 4.1508 sec + algo throughput: 15418582053.9969 bps, 15.4186 Gbps + busbw: 14.9368 Gbps +ignore me 17 +ignore me 17 +8: + duration: 4.2224 sec + algo throughput: 15157302718.4214 bps, 15.1573 Gbps + busbw: 14.6836 Gbps +ignore me 17 +29: + duration: 4.0621 sec + algo throughput: 15755272218.1164 bps, 15.7553 Gbps + busbw: 15.2629 Gbps +25: + duration: 4.1516 sec + algo throughput: 15415828590.9963 bps, 15.4158 Gbps + busbw: 14.9341 Gbps +ignore me 17 +ignore me 17 +9: + duration: 4.0906 sec + algo throughput: 15645779547.2488 bps, 15.6458 Gbps + busbw: 15.1568 Gbps +ignore me 17 +ignore me 17 +ignore me 17 +ignore me 17 +23: + duration: 4.1569 sec +30: + duration: 4.0722 sec + algo throughput: 15716173146.2812 bps, 15.7162 Gbps +1: + duration: 4.0663 sec + algo throughput: 15396140153.8145 bps, 15.3961 Gbps + busbw: 14.9150 Gbps + algo throughput: 15739134214.8659 bps, 15.7391 Gbps + busbw: 15.2473 Gbps + busbw: 15.2250 Gbps +22: + duration: 4.0428 sec + algo throughput: 15830448441.2183 bps, 15.8304 Gbps + busbw: 15.3357 Gbps +ignore me 17 +2: + duration: 4.1513 sec + algo throughput: 15416737873.4375 bps, 15.4167 Gbps + busbw: 14.9350 Gbps +ignore me 17 +ignore me 17 +10: + duration: 4.1135 sec +24: + duration: 4.0613 sec + algo throughput: 15758479220.2859 bps, 15.7585 Gbps + busbw: 15.2660 Gbps + algo throughput: 15558588332.8945 bps, 15.5586 Gbps + busbw: 15.0724 Gbps +ignore me 17 +31: + duration: 4.1502 sec + algo throughput: 15420839540.9777 bps, 15.4208 Gbps + busbw: 14.9389 Gbps +21: + duration: 4.1419 sec + algo throughput: 15451690470.9343 bps, 15.4517 Gbps + busbw: 14.9688 Gbps +ignore me 17 +ignore me 17 +ignore me 17 +11: + duration: 4.0492 sec + algo throughput: 15805693708.4176 bps, 15.8057 Gbps +20: + duration: 4.0993 sec + algo throughput: 15612440511.8644 bps, 15.6124 Gbps + busbw: 15.1246 Gbps +0: + duration: 4.0120 sec + algo throughput: 15952303597.3018 bps, 15.9523 Gbps + busbw: 15.3118 Gbps + busbw: 15.4538 Gbps +ignore me 17 +ignore me 17 +12: + duration: 4.1850 sec + algo throughput: 15292749814.3865 bps, 15.2927 Gbps + busbw: 14.8149 Gbps +19: + duration: 4.0412 sec + algo throughput: 15836843924.5534 bps, 15.8368 Gbps + busbw: 15.3419 Gbps +ignore me 17 +13: + duration: 4.0840 sec + algo throughput: 15670769926.9476 bps, 15.6708 Gbps + busbw: 15.1811 Gbps +ignore me 17 +18: + duration: 4.1647 sec + algo throughput: 15367278261.5983 bps, 15.3673 Gbps + busbw: 14.8871 Gbps +ignore me 17 +14: + duration: 4.0438 sec + algo throughput: 15826582974.8276 bps, 15.8266 Gbps + busbw: 15.3320 Gbps +ignore me 17 +ignore me 17 +17: + duration: 4.1553 sec + algo throughput: 15401946302.4121 bps, 15.4019 Gbps +15: + duration: 4.1608 sec + algo throughput: 15381558817.4705 bps, 15.3816 Gbps + busbw: 14.9206 Gbps + busbw: 14.9009 Gbps +ignore me 17 +16: + duration: 4.0474 sec + algo throughput: 15812815660.2083 bps, 15.8128 Gbps + busbw: 15.3187 Gbps +ignore me 555 +23: + duration: 1.5186 sec + algo throughput: 42143980222.5332 bps, 42.1440 Gbps +ignore me 555 + busbw: 40.8270 Gbps +9: + duration: 1.5187 sec + algo throughput: 42140589448.6002 bps, 42.1406 Gbps + busbw: 40.8237 Gbps +ignore me 555 +22: + duration: 1.5187 sec + algo throughput: 42140378571.5530 bps, 42.1404 Gbps +ignore me 555 + busbw: 40.8235 Gbps +24: + duration: 1.5187 sec + algo throughput: 42142240285.3888 bps, 42.1422 Gbps + busbw: 40.8253 Gbps +ignore me 555 +7: + duration: 1.5199 sec + algo throughput: 42108029847.7049 bps, 42.1080 Gbps + busbw: 40.7922 Gbps +ignore me 555 +ignore me 555 +10: + duration: 1.5188 sec + algo throughput: 42138916267.0821 bps, 42.1389 Gbps + busbw: 40.8221 Gbps +8: + duration: 1.5192 sec + algo throughput: 42126338602.2545 bps, 42.1263 Gbps + busbw: 40.8099 Gbps +ignore me 555 +ignore me 555 +ignore me 555 +21: + duration: 1.5188 sec + algo throughput: 42139898494.4063 bps, 42.1399 Gbps + busbw: 40.8230 Gbps +25: + duration: 1.5192 sec + algo throughput: 42127092502.8457 bps, 42.1271 Gbps + busbw: 40.8106 Gbps +6: + duration: 1.5202 sec + algo throughput: 42099423136.7009 bps, 42.0994 Gbps + busbw: 40.7838 Gbps +ignore me 555 +11: + duration: 1.5187 sec + algo throughput: 42141289163.4721 bps, 42.1413 Gbps +ignore me 555 + busbw: 40.8244 Gbps +20: + duration: 1.5188 sec + algo throughput: 42139687792.2383 bps, 42.1397 Gbps + busbw: 40.8228 Gbps +ignore me 555 +26: + duration: 1.5197 sec + algo throughput: 42113294024.4995 bps, 42.1133 Gbps + busbw: 40.7973 Gbps +ignore me 555 +ignore me 555 +5: + duration: 1.5202 sec + algo throughput: 42100022978.8723 bps, 42.1000 Gbps + busbw: 40.7844 Gbps +12: + duration: 1.5187 sec + algo throughput: 42141483180.7297 bps, 42.1415 Gbps + busbw: 40.8246 Gbps +ignore me 555 +19: + duration: 1.5188 sec + algo throughput: 42139070669.3367 bps, 42.1391 Gbps + busbw: 40.8222 Gbps +ignore me 555 +ignore me 555 +13: + duration: 1.5187 sec + algo throughput: 42140413754.7281 bps, 42.1404 Gbps +27: + duration: 1.5202 sec + algo throughput: 42099139976.4359 bps, 42.0991 Gbps + busbw: 40.7835 Gbps + busbw: 40.8235 Gbps +ignore me 555 +4: + duration: 1.5203 sec + algo throughput: 42097969076.0652 bps, 42.0980 Gbps + busbw: 40.7824 Gbps +ignore me 555 +18: + duration: 1.5187 sec + algo throughput: 42141134996.9228 bps, 42.1411 Gbps + busbw: 40.8242 Gbps +ignore me 555 +28: + duration: 1.5203 sec + algo throughput: 42097422955.6261 bps, 42.0974 Gbps +ignore me 555 + busbw: 40.7819 Gbps +ignore me 555 +14: + duration: 1.5188 sec + algo throughput: 42139893361.7641 bps, 42.1399 Gbps + busbw: 40.8230 Gbps +3: + duration: 1.5203 sec + algo throughput: 42097598433.0412 bps, 42.0976 Gbps + busbw: 40.7820 Gbps +ignore me 555 +17: + duration: 1.5188 sec + algo throughput: 42139267495.6574 bps, 42.1393 Gbps + busbw: 40.8224 Gbps +ignore me 555 +ignore me 555 +29: + duration: 1.5203 sec + algo throughput: 42096144082.6273 bps, 42.0961 Gbps +ignore me 555 + busbw: 40.7806 Gbps +15: + duration: 1.5188 sec + algo throughput: 42137175969.6847 bps, 42.1372 Gbps +ignore me 555 + busbw: 40.8204 Gbps +16: + duration: 1.5186 sec + algo throughput: 42144770940.2506 bps, 42.1448 Gbps + busbw: 40.8277 Gbps +2: + duration: 1.5201 sec + algo throughput: 42101391688.1200 bps, 42.1014 Gbps + busbw: 40.7857 Gbps +ignore me 555 +ignore me 555 +30: + duration: 1.5203 sec + algo throughput: 42096228974.3786 bps, 42.0962 Gbps + busbw: 40.7807 Gbps +1: + duration: 1.5204 sec + algo throughput: 42095494315.5608 bps, 42.0955 Gbps + busbw: 40.7800 Gbps +ignore me 555 +31: + duration: 1.5203 sec + algo throughput: 42096577970.2344 bps, 42.0966 Gbps + busbw: 40.7811 Gbps +ignore me 555 +0: + duration: 1.5203 sec + algo throughput: 42097401467.1174 bps, 42.0974 Gbps + busbw: 40.7819 Gbps +ignore me 17760 +19: + duration: 1.5271 sec + algo throughput: 41910600634.9022 bps, 41.9106 Gbps + busbw: 40.6009 Gbps +ignore me 17760 +ignore me 17760 +18: + duration: 1.5270 sec + algo throughput: 41911582289.7142 bps, 41.9116 Gbps + busbw: 40.6018 Gbps +20: + duration: 1.5276 sec + algo throughput: 41894987422.3905 bps, 41.8950 Gbps + busbw: 40.5858 Gbps +ignore me 17760 +ignore me 17760 +17: + duration: 1.5270 sec + algo throughput: 41913406576.8859 bps, 41.9134 Gbps + busbw: 40.6036 Gbps +ignore me 17760 +21: + duration: 1.5280 sec + algo throughput: 41885069299.4918 bps, 41.8851 Gbps + busbw: 40.5762 Gbps +ignore me 17760 +14: + duration: 1.5272 sec + algo throughput: 41907314947.6113 bps, 41.9073 Gbps + busbw: 40.5977 Gbps +15: + duration: 1.5270 sec + algo throughput: 41913242272.3447 bps, 41.9132 Gbps + busbw: 40.6035 Gbps +ignore me 17760 +ignore me 17760 +13: + duration: 1.5277 sec + algo throughput: 41893273876.8880 bps, 41.8933 Gbps + busbw: 40.5841 Gbps +ignore me 17760 +16: + duration: 1.5271 sec + algo throughput: 41909230280.3461 bps, 41.9092 Gbps + busbw: 40.5996 Gbps +22: + duration: 1.5286 sec + algo throughput: 41869319488.2197 bps, 41.8693 Gbps + busbw: 40.5609 Gbps +ignore me 17760 +ignore me 17760 +23: + duration: 1.5289 sec + algo throughput: 41861290350.4216 bps, 41.8613 Gbps +12: + duration: 1.5281 sec + algo throughput: 41882850453.1701 bps, 41.8829 Gbps + busbw: 40.5740 Gbps + busbw: 40.5531 Gbps +ignore me 17760 +11: + duration: 1.5286 sec + algo throughput: 41868966830.1641 bps, 41.8690 Gbps + busbw: 40.5606 Gbps +ignore me 17760 +ignore me 17760 +24: + duration: 1.5291 sec + algo throughput: 41854797523.2289 bps, 41.8548 Gbps +10: + duration: 1.5290 sec + algo throughput: 41858049187.4726 bps, 41.8580 Gbps + busbw: 40.5468 Gbps + busbw: 40.5500 Gbps +ignore me 17760 +25: + duration: 1.5291 sec + algo throughput: 41855697296.6685 bps, 41.8557 Gbps + busbw: 40.5477 Gbps +ignore me 17760 +ignore me 17760 +ignore me 17760 +9: + duration: 1.5296 sec + algo throughput: 41841767653.6339 bps, 41.8418 Gbps + busbw: 40.5342 Gbps +6: + duration: 1.5292 sec + algo throughput: 41851931325.8954 bps, 41.8519 Gbps + busbw: 40.5441 Gbps +7: + duration: 1.5294 sec + algo throughput: 41846364025.0241 bps, 41.8464 Gbps + busbw: 40.5387 Gbps +ignore me 17760 +26: + duration: 1.5290 sec + algo throughput: 41856070811.5191 bps, 41.8561 Gbps + busbw: 40.5481 Gbps +ignore me 17760 +ignore me 17760 +5: + duration: 1.5291 sec + algo throughput: 41855875143.2076 bps, 41.8559 Gbps + busbw: 40.5479 Gbps +8: + duration: 1.5295 sec + algo throughput: 41843741534.2125 bps, 41.8437 Gbps + busbw: 40.5361 Gbps +ignore me 17760 +27: + duration: 1.5290 sec + algo throughput: 41856588048.6577 bps, 41.8566 Gbps + busbw: 40.5486 Gbps +ignore me 17760 +4: + duration: 1.5290 sec + algo throughput: 41856245346.9914 bps, 41.8562 Gbps + busbw: 40.5482 Gbps +ignore me 17760 +28: + duration: 1.5290 sec + algo throughput: 41858071525.4799 bps, 41.8581 Gbps + busbw: 40.5500 Gbps +ignore me 17760 +3: + duration: 1.5290 sec + algo throughput: 41857294677.8322 bps, 41.8573 Gbps + busbw: 40.5493 Gbps +ignore me 17760 +29: + duration: 1.5289 sec + algo throughput: 41859219678.2562 bps, 41.8592 Gbps + busbw: 40.5511 Gbps +ignore me 17760 +2: + duration: 1.5289 sec + algo throughput: 41859941759.2278 bps, 41.8599 Gbps + busbw: 40.5518 Gbps +ignore me 17760 +30: + duration: 1.5289 sec + algo throughput: 41858890268.6218 bps, 41.8589 Gbps + busbw: 40.5508 Gbps +ignore me 17760 +1: + duration: 1.5290 sec + algo throughput: 41856634528.5093 bps, 41.8566 Gbps + busbw: 40.5486 Gbps +ignore me 17760 +31: + duration: 1.5290 sec + algo throughput: 41858450586.8372 bps, 41.8585 Gbps + busbw: 40.5504 Gbps +ignore me 17760 +0: + duration: 1.5289 sec + algo throughput: 41860374323.0033 bps, 41.8604 Gbps + busbw: 40.5522 Gbps +ignore me 568326 +18: + duration: 1.5292 sec + algo throughput: 41851192689.6061 bps, 41.8512 Gbps + busbw: 40.5433 Gbps +ignore me 568326 +19: + duration: 1.5296 sec + algo throughput: 41840982602.8527 bps, 41.8410 Gbps + busbw: 40.5335 Gbps +ignore me 568326 +17: + duration: 1.5292 sec + algo throughput: 41851389273.1359 bps, 41.8514 Gbps + busbw: 40.5435 Gbps +ignore me 568326 +ignore me 568326 +ignore me 568326 +14: + duration: 1.5293 sec + algo throughput: 41850546358.8408 bps, 41.8505 Gbps + busbw: 40.5427 Gbps +20: + duration: 1.5296 sec + algo throughput: 41841711605.3523 bps, 41.8417 Gbps +15: + duration: 1.5292 sec + algo throughput: 41850900844.4322 bps, 41.8509 Gbps + busbw: 40.5342 Gbps + busbw: 40.5431 Gbps +ignore me 568326 +ignore me 568326 +13: + duration: 1.5293 sec +16: + duration: 1.5292 sec + algo throughput: 41851732548.4344 bps, 41.8517 Gbps + busbw: 40.5439 Gbps + algo throughput: 41849491619.2404 bps, 41.8495 Gbps + busbw: 40.5417 Gbps +ignore me 568326 +21: + duration: 1.5296 sec + algo throughput: 41841051125.8787 bps, 41.8411 Gbps + busbw: 40.5335 Gbps +ignore me 568326 +12: + duration: 1.5293 sec + algo throughput: 41848837733.7002 bps, 41.8488 Gbps + busbw: 40.5411 Gbps +ignore me 568326 +ignore me 568326 +22: + duration: 1.5295 sec + algo throughput: 41842526390.1754 bps, 41.8425 Gbps +11: + duration: 1.5292 sec + algo throughput: 41851402077.7964 bps, 41.8514 Gbps + busbw: 40.5349 Gbps + busbw: 40.5435 Gbps +ignore me 568326 +ignore me 568326 +25: + duration: 1.5289 sec + algo throughput: 41860057899.5817 bps, 41.8601 Gbps + busbw: 40.5519 Gbps +23: + duration: 1.5296 sec + algo throughput: 41841328471.6004 bps, 41.8413 Gbps + busbw: 40.5338 Gbps +ignore me 568326 +ignore me 568326 +10: + duration: 1.5293 sec + algo throughput: 41850492064.7668 bps, 41.8505 Gbps +ignore me 568326 + busbw: 40.5427 Gbps +26: + duration: 1.5289 sec + algo throughput: 41861009756.5066 bps, 41.8610 Gbps + busbw: 40.5529 Gbps +24: + duration: 1.5293 sec + algo throughput: 41848595317.3039 bps, 41.8486 Gbps + busbw: 40.5408 Gbps +ignore me 568326 +5: + duration: 1.5289 sec + algo throughput: 41860676073.0211 bps, 41.8607 Gbps +ignore me 568326 + busbw: 40.5525 Gbps +ignore me 568326 +27: + duration: 1.5288 sec + algo throughput: 41861710376.5379 bps, 41.8617 Gbps + busbw: 40.5535 Gbps +ignore me 568326 +6: + duration: 1.5292 sec + algo throughput: 41852910485.9393 bps, 41.8529 Gbps + busbw: 40.5450 Gbps +ignore me 568326 +9: + duration: 1.5292 sec + algo throughput: 41850873996.3972 bps, 41.8509 Gbps +ignore me 568326 + busbw: 40.5430 Gbps +4: + duration: 1.5288 sec + algo throughput: 41861534698.9598 bps, 41.8615 Gbps + busbw: 40.5534 Gbps +7: + duration: 1.5293 sec + algo throughput: 41849369678.9657 bps, 41.8494 Gbps + busbw: 40.5416 Gbps +ignore me 568326 +28: + duration: 1.5289 sec + algo throughput: 41861383911.2504 bps, 41.8614 Gbps + busbw: 40.5532 Gbps +ignore me 568326 +ignore me 568326 +8: + duration: 1.5293 sec + algo throughput: 41848441035.8316 bps, 41.8484 Gbps +3: + duration: 1.5289 sec + algo throughput: 41861481198.7633 bps, 41.8615 Gbps + busbw: 40.5533 Gbps + busbw: 40.5407 Gbps +ignore me 568326 +29: + duration: 1.5289 sec + algo throughput: 41861138665.5933 bps, 41.8611 Gbps + busbw: 40.5530 Gbps +ignore me 568326 +2: + duration: 1.5289 sec + algo throughput: 41861040340.5475 bps, 41.8610 Gbps + busbw: 40.5529 Gbps +ignore me 568326 +30: + duration: 1.5289 sec + algo throughput: 41861393521.7231 bps, 41.8614 Gbps +ignore me 568326 + busbw: 40.5532 Gbps +1: + duration: 1.5288 sec + algo throughput: 41863250360.5825 bps, 41.8633 Gbps + busbw: 40.5550 Gbps +ignore me 568326 +31: + duration: 1.5289 sec + algo throughput: 41860930490.0206 bps, 41.8609 Gbps + busbw: 40.5528 Gbps +ignore me 568326 +0: + duration: 1.5289 sec + algo throughput: 41861381313.3954 bps, 41.8614 Gbps + busbw: 40.5532 Gbps +ignore me 18186434 +18: + duration: 1.5304 sec + algo throughput: 41819308451.5824 bps, 41.8193 Gbps + busbw: 40.5125 Gbps +ignore me 18186434 +19: + duration: 1.5304 sec + algo throughput: 41819374415.9696 bps, 41.8194 Gbps + busbw: 40.5125 Gbps +ignore me 18186434 +17: + duration: 1.5304 sec + algo throughput: 41819400154.7344 bps, 41.8194 Gbps + busbw: 40.5125 Gbps +ignore me 18186434 +ignore me 18186434 +15: + duration: 1.5303 sec + algo throughput: 41821175681.0869 bps, 41.8212 Gbps +20: + duration: 1.5304 sec + algo throughput: 41820265560.0101 bps, 41.8203 Gbps + busbw: 40.5134 Gbps + busbw: 40.5143 Gbps +ignore me 18186434 +14: + duration: 1.5305 sec + algo throughput: 41817412474.7738 bps, 41.8174 Gbps + busbw: 40.5106 Gbps +ignore me 18186434 +16: + duration: 1.5304 sec + algo throughput: 41820405171.5425 bps, 41.8204 Gbps + busbw: 40.5135 Gbps +ignore me 18186434 +ignore me 18186434 +21: + duration: 1.5304 sec + algo throughput: 41820211341.2948 bps, 41.8202 Gbps + busbw: 40.5133 Gbps +13: + duration: 1.5305 sec + algo throughput: 41815893542.3173 bps, 41.8159 Gbps + busbw: 40.5091 Gbps +ignore me 18186434 +ignore me 18186434 +22: + duration: 1.5304 sec + algo throughput: 41819993958.8392 bps, 41.8200 Gbps + busbw: 40.5131 Gbps +12: + duration: 1.5305 sec + algo throughput: 41816988451.4211 bps, 41.8170 Gbps + busbw: 40.5102 Gbps +ignore me 18186434 +23: + duration: 1.5304 sec + algo throughput: 41820013685.7934 bps, 41.8200 Gbps + busbw: 40.5131 Gbps +ignore me 18186434 +11: + duration: 1.5306 sec + algo throughput: 41813631070.6557 bps, 41.8136 Gbps + busbw: 40.5070 Gbps +ignore me 18186434 +10: + duration: 1.5306 sec + algo throughput: 41813136230.6469 bps, 41.8131 Gbps + busbw: 40.5065 Gbps +ignore me 18186434 +24: + duration: 1.5306 sec + algo throughput: 41813362805.8615 bps, 41.8134 Gbps + busbw: 40.5067 Gbps +ignore me 18186434 +ignore me 18186434 +9: + duration: 1.5306 sec + algo throughput: 41814612837.9065 bps, 41.8146 Gbps +25: + duration: 1.5311 sec + algo throughput: 41801050732.9013 bps, 41.8011 Gbps + busbw: 40.4948 Gbps + busbw: 40.5079 Gbps +ignore me 18186434 +ignore me 18186434 +6: + duration: 1.5307 sec + algo throughput: 41811611108.9466 bps, 41.8116 Gbps + busbw: 40.5050 Gbps +7: + duration: 1.5305 sec + algo throughput: 41815091867.5771 bps, 41.8151 Gbps + busbw: 40.5084 Gbps +ignore me 18186434 +8: + duration: 1.5304 sec + algo throughput: 41818224707.1108 bps, 41.8182 Gbps + busbw: 40.5114 Gbps +ignore me 18186434 +26: + duration: 1.5311 sec + algo throughput: 41799543931.1436 bps, 41.7995 Gbps + busbw: 40.4933 Gbps +ignore me 18186434 +5: + duration: 1.5311 sec + algo throughput: 41800540982.4688 bps, 41.8005 Gbps + busbw: 40.4943 Gbps +ignore me 18186434 +27: + duration: 1.5311 sec + algo throughput: 41798734639.3871 bps, 41.7987 Gbps + busbw: 40.4925 Gbps +ignore me 18186434 +4: + duration: 1.5311 sec + algo throughput: 41799893567.7921 bps, 41.7999 Gbps + busbw: 40.4936 Gbps +ignore me 18186434 +28: + duration: 1.5312 sec + algo throughput: 41798021113.2911 bps, 41.7980 Gbps + busbw: 40.4918 Gbps +ignore me 18186434 +3: + duration: 1.5311 sec + algo throughput: 41799656984.3057 bps, 41.7997 Gbps + busbw: 40.4934 Gbps +ignore me 18186434 +29: + duration: 1.5312 sec +ignore me 18186434 + algo throughput: 41797483455.9485 bps, 41.7975 Gbps + busbw: 40.4913 Gbps +2: + duration: 1.5312 sec + algo throughput: 41797889916.8612 bps, 41.7979 Gbps + busbw: 40.4917 Gbps +ignore me 18186434 +30: + duration: 1.5312 sec + algo throughput: 41797399459.7577 bps, 41.7974 Gbps + busbw: 40.4912 Gbps +ignore me 18186434 +1: + duration: 1.5312 sec + algo throughput: 41796838922.8479 bps, 41.7968 Gbps + busbw: 40.4907 Gbps +ignore me 18186434 +31: + duration: 1.5312 sec + algo throughput: 41798535248.2715 bps, 41.7985 Gbps + busbw: 40.4923 Gbps +ignore me 18186434 +0: + duration: 1.5312 sec + algo throughput: 41797155891.1448 bps, 41.7972 Gbps + busbw: 40.4910 Gbps diff --git a/experiments/bandwidth/all_reduce_bench.py b/experiments/bandwidth/all_reduce_bench.py new file mode 100644 index 0000000000000000000000000000000000000000..7042f8b6bdaa1887c85ae571290a1e7a8b6a048c --- /dev/null +++ b/experiments/bandwidth/all_reduce_bench.py @@ -0,0 +1,66 @@ +# python -m torch.distributed.run --nproc_per_node=2 all_reduce_bench.py + +import argparse +import fcntl +import os +import socket +import time +import torch +import torch.distributed as dist + +# note: this benchmark doesn't care how many gpus per node one has + +TRIALS = 5 + +N = 500000 +M = 2000 + +def printflock(*msgs): + """ print """ + with open(__file__, "r") as fh: + fcntl.flock(fh, fcntl.LOCK_EX) + try: + print(*msgs) + finally: + fcntl.flock(fh, fcntl.LOCK_UN) + +def timed_allreduce(mat, id): + pre = time.perf_counter() + dist.all_reduce(mat) + printflock(f"ignore me {int(mat[0][0])}") # required due to lazy evaluation + duration = time.perf_counter() - pre + tput = ((M*N*4*2)/duration)*8 # *2 is for send + receive, *8 for gigabits/second + size = M * N * 4 # 4 is fp32 + n = dist.get_world_size() + busbw = (size / duration) * (2 * (n - 1) / n) * 8 + printflock(f"{id}:\n", + f"duration: {duration:.4f} sec\n", + f"algo throughput: {tput:.4f} bps, {tput/1e9:.4f} Gbps\n", + f"busbw: {busbw / 1e9:.4f} Gbps" + ) + +def run(local_rank): + hostname = socket.gethostname() + id = f"{hostname}:{local_rank}" + global_rank = dist.get_rank() + + printflock(f"{id} data size: {M*N*4/1e9} GB") + mat = torch.rand(N, M, dtype=torch.float32).cuda(local_rank) + + for i in range(TRIALS): + dist.barrier() + if global_rank == 0: + print(f"\n\n\n-----------trial-{i}----------------") + timed_allreduce(mat, id) + +def init_processes(local_rank, fn, backend='nccl'): + torch.cuda.set_device(local_rank) + dist.init_process_group(backend) + fn(local_rank) + + +if __name__ == "__main__": + rank = int(os.environ["LOCAL_RANK"]) + printflock("local_rank: %d" % rank) + init_processes(local_rank=rank, fn=run) + diff --git a/experiments/bandwidth/n16_32gb_all_reduce_bench.txt b/experiments/bandwidth/n16_32gb_all_reduce_bench.txt new file mode 100644 index 0000000000000000000000000000000000000000..3fbe1d69f2501c0f6a52b1243639cfbf8f15c044 --- /dev/null +++ b/experiments/bandwidth/n16_32gb_all_reduce_bench.txt @@ -0,0 +1,3145 @@ +export NCCL_DEBUG=info +srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.launch --nnodes 16 --nproc_per_node=4 --node_rank $SLURM_PROCID --master_addr r7i4n1 --master_port 12345 all_reduce_bench.py' > 16_node_32gb_all_reduce_bench.txt + +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +local_rank: 1 +local_rank: 3 +local_rank: 2 +local_rank: 1 +local_rank: 2 +local_rank: 2 +local_rank: 2 +local_rank: 3 +local_rank: 1 +local_rank: 2 +local_rank: 0 +local_rank: 3 +local_rank: 1 +local_rank: 2 +local_rank: 3 +local_rank: 0 +local_rank: 3 +local_rank: 3 +local_rank: 1 +local_rank: 0 +local_rank: 0 +local_rank: 2 +local_rank: 0 +local_rank: 3 +local_rank: 2 +local_rank: 3 +local_rank: 0 +local_rank: 1 +local_rank: 2 +local_rank: 1 +local_rank: 3 +local_rank: 3 +local_rank: 3 +local_rank: 1 +local_rank: 0 +local_rank: 1 +local_rank: 2 +local_rank: 1 +local_rank: 3 +local_rank: 2 +local_rank: 2 +local_rank: 1 +local_rank: 3 +local_rank: 1 +local_rank: 2 +local_rank: 3 +local_rank: 0 +local_rank: 1 +local_rank: 2 +local_rank: 1 +local_rank: 3 +local_rank: 0 +local_rank: 2 +local_rank: 0 +local_rank: 0 +local_rank: 0 +local_rank: 0 +local_rank: 2 +local_rank: 3 +local_rank: 1 +local_rank: 0 +local_rank: 0 +local_rank: 0 +local_rank: 1 +9 data size: 4.0 GB +0 data size: 4.0 GB +33 data size: 4.0 GB +39 data size: 4.0 GB +60 data size: 4.0 GB +10 data size: 4.0 GB +37 data size: 4.0 GB +63 data size: 4.0 GB +62 data size: 4.0 GB +25 data size: 4.0 GB +41 data size: 4.0 GB +38 data size: 4.0 GB +43 data size: 4.0 GB +42 data size: 4.0 GB +8 data size: 4.0 GB +21 data size: 4.0 GB +27 data size: 4.0 GB +36 data size: 4.0 GB +26 data size: 4.0 GB +24 data size: 4.0 GB +23 data size: 4.0 GB +22 data size: 4.0 GB +20 data size: 4.0 GB +3 data size: 4.0 GB +1 data size: 4.0 GB +14 data size: 4.0 GB +12 data size: 4.0 GB +2 data size: 4.0 GB +15 data size: 4.0 GB +29 data size: 4.0 GB +13 data size: 4.0 GB +51 data size: 4.0 GB +54 data size: 4.0 GB +30 data size: 4.0 GB +49 data size: 4.0 GB +28 data size: 4.0 GB +59 data size: 4.0 GB +31 data size: 4.0 GB +55 data size: 4.0 GB +48 data size: 4.0 GB +50 data size: 4.0 GB +17 data size: 4.0 GB +6 data size: 4.0 GB +58 data size: 4.0 GB +53 data size: 4.0 GB +57 data size: 4.0 GB +52 data size: 4.0 GB +35 data size: 4.0 GB +11 data size: 4.0 GB +16 data size: 4.0 GB +56 data size: 4.0 GB +4 data size: 4.0 GB +61 data size: 4.0 GB +40 data size: 4.0 GB +46 data size: 4.0 GB +19 data size: 4.0 GB +32 data size: 4.0 GB +18 data size: 4.0 GB +34 data size: 4.0 GB +7 data size: 4.0 GB +5 data size: 4.0 GB +45 data size: 4.0 GB +47 data size: 4.0 GB +44 data size: 4.0 GB +r7i4n1:77231:77231 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.76<0> [1]ib1:10.149.0.76<0> +r7i4n1:77231:77231 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i4n1:77231:77231 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.76<0> +r7i4n1:77231:77231 [0] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r7i4n1:77234:77234 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.76<0> [1]ib1:10.149.0.76<0> +r8i5n2:15801:15801 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.4.37<0> [1]ib1:10.149.4.37<0> +r7i4n1:77234:77234 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i5n2:15801:15801 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n7:81279:81279 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.145<0> [1]ib1:10.149.7.145<0> +r14i7n7:81279:81279 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i4n1:77234:77234 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.76<0> +r7i4n1:77234:77234 [3] NCCL INFO Using network IB +r8i5n2:15801:15801 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.4.37<0> +r8i5n2:15801:15801 [3] NCCL INFO Using network IB +r8i5n2:15800:15800 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.4.37<0> [1]ib1:10.149.4.37<0> +r14i7n7:81279:81279 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.145<0> +r14i7n7:81279:81279 [0] NCCL INFO Using network IB +r8i5n2:15800:15800 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n3:66129:66129 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.154<0> [1]ib1:10.149.7.154<0> +r14i7n3:66129:66129 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n0:25487:25487 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.151<0> [1]ib1:10.149.7.151<0> +r8i5n2:15800:15800 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.4.37<0> +r8i5n2:15800:15800 [2] NCCL INFO Using network IB +r14i7n0:25487:25487 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n7:81280:81280 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.145<0> [1]ib1:10.149.7.145<0> +r14i7n3:66129:66129 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.154<0> +r14i7n3:66129:66129 [3] NCCL INFO Using network IB +r8i1n1:21966:21966 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.205<0> [1]ib1:10.149.0.205<0> +r14i7n7:81280:81280 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i1n1:21966:21966 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n0:25487:25487 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.151<0> +r14i7n0:25487:25487 [0] NCCL INFO Using network IB +r14i7n7:81280:81280 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.145<0> +r14i7n7:81280:81280 [1] NCCL INFO Using network IB +r8i1n1:21966:21966 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.205<0> +r8i1n1:21966:21966 [3] NCCL INFO Using network IB +r14i7n2:39446:39446 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.153<0> [1]ib1:10.149.7.153<0> +r14i7n0:25490:25490 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.151<0> [1]ib1:10.149.7.151<0> +r14i7n2:39446:39446 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n0:25490:25490 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i6n2:60564:60564 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.95<0> [1]ib1:10.149.0.95<0> +r14i7n2:39446:39446 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.153<0> +r14i7n2:39446:39446 [1] NCCL INFO Using network IB +r14i7n0:25490:25490 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.151<0> +r14i7n0:25490:25490 [3] NCCL INFO Using network IB +r7i6n2:60564:60564 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i3n6:76352:76352 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.228<0> [1]ib1:10.149.0.228<0> +r8i3n1:64579:64579 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.223<0> [1]ib1:10.149.0.223<0> +r14i7n3:66127:66127 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.154<0> [1]ib1:10.149.7.154<0> +r8i3n6:76352:76352 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i3n1:64579:64579 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n3:66127:66127 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i6n2:60564:60564 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.95<0> +r7i6n2:60564:60564 [0] NCCL INFO Using network IB +r8i2n1:41757:41757 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.214<0> [1]ib1:10.149.0.214<0> +r8i1n1:21963:21963 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.205<0> [1]ib1:10.149.0.205<0> +r14i7n3:66126:66126 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.154<0> [1]ib1:10.149.7.154<0> +r14i7n6:27449:27449 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.144<0> [1]ib1:10.149.7.144<0> +r8i1n1:21965:21965 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.205<0> [1]ib1:10.149.0.205<0> +r8i2n1:41757:41757 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i1n1:21963:21963 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n3:66127:66127 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.154<0> +r14i7n3:66127:66127 [1] NCCL INFO Using network IB +r8i3n6:76352:76352 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.228<0> +r8i3n6:76352:76352 [0] NCCL INFO Using network IB +r8i3n1:64581:64581 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.223<0> [1]ib1:10.149.0.223<0> +r14i7n3:66126:66126 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n6:27449:27449 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n4:5389:5389 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.142<0> [1]ib1:10.149.7.142<0> +r8i3n1:64579:64579 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.223<0> +r8i3n1:64579:64579 [1] NCCL INFO Using network IB +r8i1n1:21965:21965 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i3n1:64581:64581 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n4:5389:5389 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n6:27448:27448 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.144<0> [1]ib1:10.149.7.144<0> +r8i1n1:21963:21963 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.205<0> +r8i1n1:21963:21963 [0] NCCL INFO Using network IB +r8i2n1:41757:41757 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.214<0> +r8i2n1:41757:41757 [0] NCCL INFO Using network IB +r8i1n1:21965:21965 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.205<0> +r8i1n1:21965:21965 [2] NCCL INFO Using network IB +r14i7n3:66126:66126 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.154<0> +r14i7n3:66126:66126 [0] NCCL INFO Using network IB +r14i7n6:27448:27448 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n6:27449:27449 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.144<0> +r14i7n6:27449:27449 [1] NCCL INFO Using network IB +r8i3n1:64581:64581 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.223<0> +r8i3n1:64581:64581 [3] NCCL INFO Using network IB +r14i7n7:81282:81282 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.145<0> [1]ib1:10.149.7.145<0> +r14i7n4:5389:5389 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.142<0> +r14i7n4:5389:5389 [1] NCCL INFO Using network IB +r14i7n7:81282:81282 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i5n2:15798:15798 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.4.37<0> [1]ib1:10.149.4.37<0> +r8i2n1:41759:41759 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.214<0> [1]ib1:10.149.0.214<0> +r14i7n6:27448:27448 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.144<0> +r14i7n6:27448:27448 [0] NCCL INFO Using network IB +r8i5n2:15798:15798 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i2n1:41759:41759 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n4:5391:5391 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.142<0> [1]ib1:10.149.7.142<0> +r8i3n6:76355:76355 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.228<0> [1]ib1:10.149.0.228<0> +r8i3n6:76353:76353 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.228<0> [1]ib1:10.149.0.228<0> +r14i7n7:81282:81282 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.145<0> +r14i7n7:81282:81282 [3] NCCL INFO Using network IB +r14i7n6:27450:27450 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.144<0> [1]ib1:10.149.7.144<0> +r14i7n4:5391:5391 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n1:29727:29727 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.152<0> [1]ib1:10.149.7.152<0> +r14i7n6:27450:27450 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i3n6:76355:76355 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i3n6:76353:76353 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i5n2:15798:15798 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.4.37<0> +r8i5n2:15798:15798 [0] NCCL INFO Using network IB +r14i7n1:29727:29727 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i2n1:41759:41759 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.214<0> +r8i2n1:41759:41759 [2] NCCL INFO Using network IB +r14i7n5:42531:42531 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.143<0> [1]ib1:10.149.7.143<0> +r14i7n5:42529:42529 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.143<0> [1]ib1:10.149.7.143<0> +r14i7n4:5391:5391 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.142<0> +r14i7n4:5391:5391 [3] NCCL INFO Using network IB +r14i7n5:42531:42531 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n5:42529:42529 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i1n7:45558:45558 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.211<0> [1]ib1:10.149.0.211<0> +r14i7n6:27450:27450 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.144<0> +r14i7n6:27450:27450 [2] NCCL INFO Using network IB +r8i2n1:41758:41758 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.214<0> [1]ib1:10.149.0.214<0> +r8i3n6:76355:76355 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.228<0> +r8i3n6:76355:76355 [3] NCCL INFO Using network IB +r14i7n6:27451:27451 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.144<0> [1]ib1:10.149.7.144<0> +r8i3n6:76353:76353 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.228<0> +r8i3n6:76353:76353 [1] NCCL INFO Using network IB +r8i1n7:45558:45558 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n1:29727:29727 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.152<0> +r14i7n1:29727:29727 [2] NCCL INFO Using network IB +r8i2n1:41758:41758 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n6:27451:27451 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i1n1:21964:21964 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.205<0> [1]ib1:10.149.0.205<0> +r14i7n5:42531:42531 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.143<0> +r14i7n5:42531:42531 [3] NCCL INFO Using network IB +r8i3n1:64580:64580 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.223<0> [1]ib1:10.149.0.223<0> +r14i7n5:42529:42529 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.143<0> +r14i7n5:42529:42529 [1] NCCL INFO Using network IB +r8i1n1:21964:21964 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n7:81281:81281 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.145<0> [1]ib1:10.149.7.145<0> +r8i2n1:41758:41758 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.214<0> +r8i2n1:41758:41758 [1] NCCL INFO Using network IB +r8i3n1:64580:64580 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n6:27451:27451 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.144<0> +r14i7n6:27451:27451 [3] NCCL INFO Using network IB +r8i1n7:45558:45558 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.211<0> +r8i1n7:45558:45558 [3] NCCL INFO Using network IB +r14i7n7:81281:81281 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n5:42530:42530 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.143<0> [1]ib1:10.149.7.143<0> +r14i7n0:25489:25489 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.151<0> [1]ib1:10.149.7.151<0> +r14i7n5:42530:42530 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i1n1:21964:21964 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.205<0> +r8i1n1:21964:21964 [1] NCCL INFO Using network IB +r14i7n0:25489:25489 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i3n1:64580:64580 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.223<0> +r8i3n1:64580:64580 [2] NCCL INFO Using network IB +r14i7n4:5390:5390 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.142<0> [1]ib1:10.149.7.142<0> +r8i3n1:64578:64578 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.223<0> [1]ib1:10.149.0.223<0> +r14i7n7:81281:81281 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.145<0> +r14i7n7:81281:81281 [2] NCCL INFO Using network IB +r14i7n4:5390:5390 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i3n1:64578:64578 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n5:42530:42530 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.143<0> +r7i6n2:60566:60566 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.95<0> [1]ib1:10.149.0.95<0> +r14i7n5:42530:42530 [2] NCCL INFO Using network IB +r14i7n0:25489:25489 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.151<0> +r14i7n0:25489:25489 [2] NCCL INFO Using network IB +r8i2n1:41760:41760 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.214<0> [1]ib1:10.149.0.214<0> +r7i6n2:60566:60566 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i3n1:64578:64578 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.223<0> +r8i3n1:64578:64578 [0] NCCL INFO Using network IB +r8i2n1:41760:41760 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n1:29728:29728 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.152<0> [1]ib1:10.149.7.152<0> +r14i7n4:5390:5390 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.142<0> +r14i7n4:5390:5390 [2] NCCL INFO Using network IB +r14i7n1:29726:29726 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.152<0> [1]ib1:10.149.7.152<0> +r14i7n1:29728:29728 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i6n2:60565:60565 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.95<0> [1]ib1:10.149.0.95<0> +r14i7n1:29726:29726 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i6n2:60566:60566 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.95<0> +r7i6n2:60566:60566 [2] NCCL INFO Using network IB +r8i1n7:45555:45555 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.211<0> [1]ib1:10.149.0.211<0> +r7i6n2:60565:60565 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i2n1:41760:41760 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.214<0> +r8i2n1:41760:41760 [3] NCCL INFO Using network IB +r8i1n7:45555:45555 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n1:29728:29728 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.152<0> +r14i7n1:29728:29728 [3] NCCL INFO Using network IB +r14i7n1:29726:29726 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.152<0> +r14i7n1:29726:29726 [1] NCCL INFO Using network IB +r7i6n2:60565:60565 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.95<0> +r7i6n2:60565:60565 [1] NCCL INFO Using network IB +r8i1n7:45555:45555 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.211<0> +r8i1n7:45555:45555 [0] NCCL INFO Using network IB +r14i7n2:39448:39448 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.153<0> [1]ib1:10.149.7.153<0> +r14i7n2:39448:39448 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i6n2:60567:60567 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.95<0> [1]ib1:10.149.0.95<0> +r14i7n0:25488:25488 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.151<0> [1]ib1:10.149.7.151<0> +r8i3n6:76354:76354 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.228<0> [1]ib1:10.149.0.228<0> +r7i6n2:60567:60567 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n0:25488:25488 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i3n6:76354:76354 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n2:39448:39448 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.153<0> +r14i7n2:39448:39448 [3] NCCL INFO Using network IB +r14i7n3:66128:66128 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.154<0> [1]ib1:10.149.7.154<0> +r14i7n3:66128:66128 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i6n2:60567:60567 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.95<0> +r7i6n2:60567:60567 [3] NCCL INFO Using network IB +r14i7n0:25488:25488 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.151<0> +r14i7n0:25488:25488 [1] NCCL INFO Using network IB +r8i3n6:76354:76354 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.228<0> +r14i7n4:5388:5388 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.142<0> [1]ib1:10.149.7.142<0> +r8i3n6:76354:76354 [2] NCCL INFO Using network IB +r14i7n5:42528:42528 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.143<0> [1]ib1:10.149.7.143<0> +r14i7n4:5388:5388 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n3:66128:66128 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.154<0> +r14i7n3:66128:66128 [2] NCCL INFO Using network IB +r14i7n5:42528:42528 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i1n7:45557:45557 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.211<0> [1]ib1:10.149.0.211<0> +r8i1n7:45557:45557 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n4:5388:5388 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.142<0> +r14i7n4:5388:5388 [0] NCCL INFO Using network IB +r14i7n2:39445:39445 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.153<0> [1]ib1:10.149.7.153<0> +r14i7n5:42528:42528 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.143<0> +r14i7n5:42528:42528 [0] NCCL INFO Using network IB +r14i7n2:39445:39445 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i1n7:45557:45557 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.211<0> +r8i1n7:45557:45557 [2] NCCL INFO Using network IB +r14i7n2:39445:39445 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.153<0> +r14i7n2:39445:39445 [0] NCCL INFO Using network IB +r14i7n2:39447:39447 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.153<0> [1]ib1:10.149.7.153<0> +r14i7n2:39447:39447 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i1n7:45556:45556 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.211<0> [1]ib1:10.149.0.211<0> +r8i1n7:45556:45556 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n2:39447:39447 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.153<0> +r14i7n2:39447:39447 [2] NCCL INFO Using network IB +r8i1n7:45556:45556 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.211<0> +r8i1n7:45556:45556 [1] NCCL INFO Using network IB +r14i7n1:29725:29725 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.7.152<0> [1]ib1:10.149.7.152<0> +r14i7n1:29725:29725 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r14i7n1:29725:29725 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.7.152<0> +r14i7n1:29725:29725 [0] NCCL INFO Using network IB +r7i4n1:77232:77232 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.76<0> [1]ib1:10.149.0.76<0> +r7i4n1:77232:77232 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i4n1:77233:77233 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.76<0> [1]ib1:10.149.0.76<0> +r7i4n1:77233:77233 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i4n1:77232:77232 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.76<0> +r7i4n1:77232:77232 [1] NCCL INFO Using network IB +r7i4n1:77233:77233 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.76<0> +r7i4n1:77233:77233 [2] NCCL INFO Using network IB +r8i5n2:15799:15799 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.4.37<0> [1]ib1:10.149.4.37<0> +r8i5n2:15799:15799 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i5n2:15799:15799 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.4.37<0> +r8i5n2:15799:15799 [1] NCCL INFO Using network IB +r8i3n6:76354:76402 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i3n6:76355:76396 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i3n6:76355:76396 [3] NCCL INFO Trees [0] -1/-1/-1->27->26|26->27->-1/-1/-1 [1] 24/-1/-1->27->26|26->27->24/-1/-1 [2] -1/-1/-1->27->26|26->27->-1/-1/-1 [3] 24/-1/-1->27->26|26->27->24/-1/-1 +r8i3n6:76354:76402 [2] NCCL INFO Trees [0] 27/-1/-1->26->25|25->26->27/-1/-1 [1] 27/22/30->26->18|18->26->27/22/30 [2] 27/-1/-1->26->25|25->26->27/-1/-1 [3] 27/-1/-1->26->30|30->26->27/-1/-1 +r8i3n6:76355:76396 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r8i3n6:76354:76402 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r8i3n6:76353:76397 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i3n6:76352:76387 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i3n6:76353:76397 [1] NCCL INFO Trees [0] 26/-1/-1->25->24|24->25->26/-1/-1 [1] -1/-1/-1->25->24|24->25->-1/-1/-1 [2] 26/-1/-1->25->24|24->25->26/-1/-1 [3] -1/-1/-1->25->24|24->25->-1/-1/-1 +r8i3n6:76352:76387 [0] NCCL INFO Trees [0] 25/20/28->24->16|16->24->25/20/28 [1] 25/-1/-1->24->27|27->24->25/-1/-1 [2] 25/-1/-1->24->28|28->24->25/-1/-1 [3] 25/-1/-1->24->27|27->24->25/-1/-1 +r8i3n6:76353:76397 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r8i3n6:76352:76387 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r8i3n1:64581:64618 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i3n1:64581:64618 [3] NCCL INFO Trees [0] -1/-1/-1->23->22|22->23->-1/-1/-1 [1] 20/-1/-1->23->22|22->23->20/-1/-1 [2] -1/-1/-1->23->22|22->23->-1/-1/-1 [3] 20/-1/-1->23->22|22->23->20/-1/-1 +r8i3n1:64580:64623 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i2n1:41760:41808 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i2n1:41758:41803 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i2n1:41760:41808 [3] NCCL INFO Trees [0] -1/-1/-1->19->18|18->19->-1/-1/-1 [1] 16/-1/-1->19->18|18->19->16/-1/-1 [2] -1/-1/-1->19->18|18->19->-1/-1/-1 [3] 16/-1/-1->19->18|18->19->16/-1/-1 +r8i3n1:64579:64613 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i3n1:64580:64623 [2] NCCL INFO Trees [0] 23/-1/-1->22->21|21->22->23/-1/-1 [1] 23/-1/-1->22->26|26->22->23/-1/-1 [2] 23/-1/-1->22->21|21->22->23/-1/-1 [3] 23/14/30->22->38|38->22->23/14/30 +r8i3n1:64581:64618 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r8i3n1:64579:64613 [1] NCCL INFO Trees [0] 22/-1/-1->21->20|20->21->22/-1/-1 [1] -1/-1/-1->21->20|20->21->-1/-1/-1 [2] 22/-1/-1->21->20|20->21->22/-1/-1 [3] -1/-1/-1->21->20|20->21->-1/-1/-1 +r8i3n1:64580:64623 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r8i3n1:64579:64613 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r8i3n1:64578:64628 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i3n1:64578:64628 [0] NCCL INFO Trees [0] 21/-1/-1->20->24|24->20->21/-1/-1 [1] 21/-1/-1->20->23|23->20->21/-1/-1 [2] 21/12/28->20->36|36->20->21/12/28 [3] 21/-1/-1->20->23|23->20->21/-1/-1 +r8i3n1:64578:64628 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r8i1n7:45558:45590 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i1n7:45558:45590 [3] NCCL INFO Trees [0] -1/-1/-1->15->14|14->15->-1/-1/-1 [1] 12/-1/-1->15->14|14->15->12/-1/-1 [2] -1/-1/-1->15->14|14->15->-1/-1/-1 [3] 12/-1/-1->15->14|14->15->12/-1/-1 +r8i1n1:21966:21998 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i2n1:41759:41798 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i2n1:41758:41803 [1] NCCL INFO Trees [0] 18/-1/-1->17->16|16->17->18/-1/-1 [1] -1/-1/-1->17->16|16->17->-1/-1/-1 [2] 18/-1/-1->17->16|16->17->18/-1/-1 [3] -1/-1/-1->17->16|16->17->-1/-1/-1 +r8i2n1:41759:41798 [2] NCCL INFO Trees [0] 19/-1/-1->18->17|17->18->19/-1/-1 [1] 19/10/26->18->34|34->18->19/10/26 [2] 19/-1/-1->18->17|17->18->19/-1/-1 [3] 19/-1/-1->18->14|14->18->19/-1/-1 +r8i2n1:41760:41808 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r8i2n1:41757:41793 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i2n1:41758:41803 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r8i2n1:41759:41798 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r8i1n7:45557:45600 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i1n7:45558:45590 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r8i1n7:45557:45600 [2] NCCL INFO Trees [0] 15/-1/-1->14->13|13->14->15/-1/-1 [1] 15/-1/-1->14->10|10->14->15/-1/-1 [2] 15/-1/-1->14->13|13->14->15/-1/-1 [3] 15/10/18->14->22|22->14->15/10/18 +r8i1n7:45555:45595 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i1n7:45556:45605 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i2n1:41757:41793 [0] NCCL INFO Trees [0] 17/8/24->16->32|32->16->17/8/24 [1] 17/-1/-1->16->19|19->16->17/-1/-1 [2] 17/-1/-1->16->12|12->16->17/-1/-1 [3] 17/-1/-1->16->19|19->16->17/-1/-1 +r8i2n1:41757:41793 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r8i1n7:45557:45600 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r8i1n7:45555:45595 [0] NCCL INFO Trees [0] 13/-1/-1->12->8|8->12->13/-1/-1 [1] 13/-1/-1->12->15|15->12->13/-1/-1 [2] 13/8/16->12->20|20->12->13/8/16 [3] 13/-1/-1->12->15|15->12->13/-1/-1 +r8i1n7:45556:45605 [1] NCCL INFO Trees [0] 14/-1/-1->13->12|12->13->14/-1/-1 [1] -1/-1/-1->13->12|12->13->-1/-1/-1 [2] 14/-1/-1->13->12|12->13->14/-1/-1 [3] -1/-1/-1->13->12|12->13->-1/-1/-1 +r8i1n7:45555:45595 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r8i1n7:45556:45605 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r8i1n1:21965:22008 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i1n1:21966:21998 [3] NCCL INFO Trees [0] -1/-1/-1->11->10|10->11->-1/-1/-1 [1] 8/-1/-1->11->10|10->11->8/-1/-1 [2] -1/-1/-1->11->10|10->11->-1/-1/-1 [3] 8/-1/-1->11->10|10->11->8/-1/-1 +r8i1n1:21965:22008 [2] NCCL INFO Trees [0] 11/-1/-1->10->9|9->10->11/-1/-1 [1] 11/6/14->10->18|18->10->11/6/14 [2] 11/-1/-1->10->9|9->10->11/-1/-1 [3] 11/-1/-1->10->14|14->10->11/-1/-1 +r8i5n2:15800:15838 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i5n2:15799:15848 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i5n2:15800:15838 [2] NCCL INFO Trees [0] 31/-1/-1->30->29|29->30->31/-1/-1 [1] 31/-1/-1->30->26|26->30->31/-1/-1 [2] 31/-1/-1->30->29|29->30->31/-1/-1 [3] 31/26/34->30->22|22->30->31/26/34 +r8i5n2:15799:15848 [1] NCCL INFO Trees [0] 30/-1/-1->29->28|28->29->30/-1/-1 [1] -1/-1/-1->29->28|28->29->-1/-1/-1 [2] 30/-1/-1->29->28|28->29->30/-1/-1 [3] -1/-1/-1->29->28|28->29->-1/-1/-1 +r8i1n1:21966:21998 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r8i1n1:21965:22008 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r8i1n1:21964:22013 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i6n2:60567:60614 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i6n2:60566:60604 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i6n2:60567:60614 [3] NCCL INFO Trees [0] -1/-1/-1->7->6|6->7->-1/-1/-1 [1] 4/-1/-1->7->6|6->7->4/-1/-1 [2] -1/-1/-1->7->6|6->7->-1/-1/-1 [3] 4/-1/-1->7->6|6->7->4/-1/-1 +r8i5n2:15800:15838 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r8i5n2:15799:15848 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r8i5n2:15798:15843 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i5n2:15798:15843 [0] NCCL INFO Trees [0] 29/-1/-1->28->24|24->28->29/-1/-1 [1] 29/-1/-1->28->31|31->28->29/-1/-1 [2] 29/24/32->28->20|20->28->29/24/32 [3] 29/-1/-1->28->31|31->28->29/-1/-1 +r14i7n1:29725:29775 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n1:29726:29770 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n1:29727:29760 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n0:25488:25537 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n0:25489:25532 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n0:25487:25522 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n0:25490:25527 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i1n1:21964:22013 [1] NCCL INFO Trees [0] 10/-1/-1->9->8|8->9->10/-1/-1 [1] -1/-1/-1->9->8|8->9->-1/-1/-1 [2] 10/-1/-1->9->8|8->9->10/-1/-1 [3] -1/-1/-1->9->8|8->9->-1/-1/-1 +r8i5n2:15798:15843 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r8i5n2:15801:15833 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i5n2:15801:15833 [3] NCCL INFO Trees [0] -1/-1/-1->31->30|30->31->-1/-1/-1 [1] 28/-1/-1->31->30|30->31->28/-1/-1 [2] -1/-1/-1->31->30|30->31->-1/-1/-1 [3] 28/-1/-1->31->30|30->31->28/-1/-1 +r8i5n2:15801:15833 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r8i1n1:21964:22013 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n1:77234:77296 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n1:77231:77291 [0] NCCL INFO Channel 00/04 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 +r7i4n1:77234:77296 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->2|2->3->0/-1/-1 [2] -1/-1/-1->3->2|2->3->-1/-1/-1 [3] 0/-1/-1->3->2|2->3->0/-1/-1 +r7i4n1:77231:77291 [0] NCCL INFO Channel 01/04 : 0 3 2 1 4 7 6 5 8 11 10 9 12 15 14 13 16 19 18 17 +r7i6n2:60566:60604 [2] NCCL INFO Trees [0] 7/-1/-1->6->5|5->6->7/-1/-1 [1] 7/-1/-1->6->10|10->6->7/-1/-1 [2] 7/-1/-1->6->5|5->6->7/-1/-1 [3] 7/38/-1->6->-1|-1->6->7/38/-1 +r7i6n2:60565:60609 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i6n2:60567:60614 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i6n2:60566:60604 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i6n2:60565:60609 [1] NCCL INFO Trees [0] 6/-1/-1->5->4|4->5->6/-1/-1 [1] -1/-1/-1->5->4|4->5->-1/-1/-1 [2] 6/-1/-1->5->4|4->5->6/-1/-1 [3] -1/-1/-1->5->4|4->5->-1/-1/-1 +r7i6n2:60564:60599 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i6n2:60565:60609 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r14i7n5:42531:42567 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n5:42530:42573 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n1:29725:29775 [0] NCCL INFO Trees [0] 37/-1/-1->36->40|40->36->37/-1/-1 [1] 37/-1/-1->36->39|39->36->37/-1/-1 [2] 37/20/52->36->4|4->36->37/20/52 [3] 37/-1/-1->36->39|39->36->37/-1/-1 +r14i7n1:29726:29770 [1] NCCL INFO Trees [0] 38/-1/-1->37->36|36->37->38/-1/-1 [1] -1/-1/-1->37->36|36->37->-1/-1/-1 [2] 38/-1/-1->37->36|36->37->38/-1/-1 [3] -1/-1/-1->37->36|36->37->-1/-1/-1 +r14i7n1:29727:29760 [2] NCCL INFO Trees [0] 39/-1/-1->38->37|37->38->39/-1/-1 [1] 39/-1/-1->38->42|42->38->39/-1/-1 [2] 39/-1/-1->38->37|37->38->39/-1/-1 [3] 39/22/54->38->6|6->38->39/22/54 +r14i7n0:25488:25537 [1] NCCL INFO Trees [0] 34/-1/-1->33->32|32->33->34/-1/-1 [1] -1/-1/-1->33->32|32->33->-1/-1/-1 [2] 34/-1/-1->33->32|32->33->34/-1/-1 [3] -1/-1/-1->33->32|32->33->-1/-1/-1 +r14i7n0:25487:25522 [0] NCCL INFO Trees [0] 33/16/48->32->0|0->32->33/16/48 [1] 33/-1/-1->32->35|35->32->33/-1/-1 [2] 33/-1/-1->32->28|28->32->33/-1/-1 [3] 33/-1/-1->32->35|35->32->33/-1/-1 +r14i7n0:25489:25532 [2] NCCL INFO Trees [0] 35/-1/-1->34->33|33->34->35/-1/-1 [1] 35/18/50->34->2|2->34->35/18/50 [2] 35/-1/-1->34->33|33->34->35/-1/-1 [3] 35/-1/-1->34->30|30->34->35/-1/-1 +r14i7n0:25490:25527 [3] NCCL INFO Trees [0] -1/-1/-1->35->34|34->35->-1/-1/-1 [1] 32/-1/-1->35->34|34->35->32/-1/-1 [2] -1/-1/-1->35->34|34->35->-1/-1/-1 [3] 32/-1/-1->35->34|34->35->32/-1/-1 +r8i1n1:21963:22007 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i6n2:60564:60599 [0] NCCL INFO Trees [0] 5/-1/-1->4->8|8->4->5/-1/-1 [1] 5/-1/-1->4->7|7->4->5/-1/-1 [2] 5/36/-1->4->-1|-1->4->5/36/-1 [3] 5/-1/-1->4->7|7->4->5/-1/-1 +r7i6n2:60564:60599 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r14i7n2:39447:39496 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n2:39445:39491 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n2:39446:39481 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n2:39445:39491 [0] NCCL INFO Trees [0] 41/36/44->40->48|48->40->41/36/44 [1] 41/-1/-1->40->43|43->40->41/-1/-1 [2] 41/-1/-1->40->44|44->40->41/-1/-1 [3] 41/-1/-1->40->43|43->40->41/-1/-1 +r14i7n1:29728:29769 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n1:29726:29770 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r14i7n1:29727:29760 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r14i7n1:29725:29775 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r14i7n6:27449:27483 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n6:27450:27493 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n6:27449:27483 [1] NCCL INFO Trees [0] 58/-1/-1->57->56|56->57->58/-1/-1 [1] -1/-1/-1->57->56|56->57->-1/-1/-1 [2] 58/-1/-1->57->56|56->57->58/-1/-1 [3] -1/-1/-1->57->56|56->57->-1/-1/-1 +r14i7n6:27450:27493 [2] NCCL INFO Trees [0] 59/-1/-1->58->57|57->58->59/-1/-1 [1] 59/54/62->58->50|50->58->59/54/62 [2] 59/-1/-1->58->57|57->58->59/-1/-1 [3] 59/-1/-1->58->62|62->58->59/-1/-1 +r14i7n0:25489:25532 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r14i7n0:25488:25537 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r14i7n0:25490:25527 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r14i7n0:25487:25522 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r8i1n1:21963:22007 [0] NCCL INFO Trees [0] 9/4/12->8->16|16->8->9/4/12 [1] 9/-1/-1->8->11|11->8->9/-1/-1 [2] 9/-1/-1->8->12|12->8->9/-1/-1 [3] 9/-1/-1->8->11|11->8->9/-1/-1 +r14i7n4:5388:5438 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n4:5390:5433 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n4:5391:5428 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n4:5389:5423 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n4:5388:5438 [0] NCCL INFO Trees [0] 49/40/56->48->32|32->48->49/40/56 [1] 49/-1/-1->48->51|51->48->49/-1/-1 [2] 49/-1/-1->48->44|44->48->49/-1/-1 [3] 49/-1/-1->48->51|51->48->49/-1/-1 +r14i7n4:5390:5433 [2] NCCL INFO Trees [0] 51/-1/-1->50->49|49->50->51/-1/-1 [1] 51/42/58->50->34|34->50->51/42/58 [2] 51/-1/-1->50->49|49->50->51/-1/-1 [3] 51/-1/-1->50->46|46->50->51/-1/-1 +r14i7n1:29728:29769 [3] NCCL INFO Trees [0] -1/-1/-1->39->38|38->39->-1/-1/-1 [1] 36/-1/-1->39->38|38->39->36/-1/-1 [2] -1/-1/-1->39->38|38->39->-1/-1/-1 [3] 36/-1/-1->39->38|38->39->36/-1/-1 +r14i7n1:29728:29769 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r14i7n7:81281:81329 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n7:81282:81324 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n7:81279:81314 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n7:81280:81319 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n7:81281:81329 [2] NCCL INFO Trees [0] 63/-1/-1->62->61|61->62->63/-1/-1 [1] 63/-1/-1->62->58|58->62->63/-1/-1 [2] 63/-1/-1->62->61|61->62->63/-1/-1 [3] 63/58/2->62->54|54->62->63/58/2 +r14i7n7:81279:81314 [0] NCCL INFO Trees [0] 61/-1/-1->60->56|56->60->61/-1/-1 [1] 61/-1/-1->60->63|63->60->61/-1/-1 [2] 61/56/0->60->52|52->60->61/56/0 [3] 61/-1/-1->60->63|63->60->61/-1/-1 +r14i7n7:81282:81324 [3] NCCL INFO Trees [0] -1/-1/-1->63->62|62->63->-1/-1/-1 [1] 60/-1/-1->63->62|62->63->60/-1/-1 [2] -1/-1/-1->63->62|62->63->-1/-1/-1 [3] 60/-1/-1->63->62|62->63->60/-1/-1 +r8i1n1:21963:22007 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i4n1:77233:77306 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n1:77231:77291 [0] NCCL INFO Channel 02/04 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 +r7i4n1:77232:77301 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n1:77234:77296 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i4n1:77233:77306 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] 3/34/-1->2->-1|-1->2->3/34/-1 [2] 3/-1/-1->2->1|1->2->3/-1/-1 [3] 3/-1/-1->2->62|62->2->3/-1/-1 +r7i4n1:77231:77291 [0] NCCL INFO Channel 03/04 : 0 3 2 1 4 7 6 5 8 11 10 9 12 15 14 13 16 19 18 17 +r7i4n1:77232:77301 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] -1/-1/-1->1->0|0->1->-1/-1/-1 [2] 2/-1/-1->1->0|0->1->2/-1/-1 [3] -1/-1/-1->1->0|0->1->-1/-1/-1 +r14i7n5:42529:42568 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n5:42530:42573 [2] NCCL INFO Trees [0] 55/-1/-1->54->53|53->54->55/-1/-1 [1] 55/-1/-1->54->58|58->54->55/-1/-1 [2] 55/-1/-1->54->53|53->54->55/-1/-1 [3] 55/46/62->54->38|38->54->55/46/62 +r14i7n5:42531:42567 [3] NCCL INFO Trees [0] -1/-1/-1->55->54|54->55->-1/-1/-1 [1] 52/-1/-1->55->54|54->55->52/-1/-1 [2] -1/-1/-1->55->54|54->55->-1/-1/-1 [3] 52/-1/-1->55->54|54->55->52/-1/-1 +r14i7n5:42528:42578 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n3:66126:66171 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n3:66127:66166 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n3:66129:66161 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n3:66128:66176 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n3:66126:66171 [0] NCCL INFO Trees [0] 45/-1/-1->44->40|40->44->45/-1/-1 [1] 45/-1/-1->44->47|47->44->45/-1/-1 [2] 45/40/48->44->52|52->44->45/40/48 [3] 45/-1/-1->44->47|47->44->45/-1/-1 +r14i7n3:66127:66166 [1] NCCL INFO Trees [0] 46/-1/-1->45->44|44->45->46/-1/-1 [1] -1/-1/-1->45->44|44->45->-1/-1/-1 [2] 46/-1/-1->45->44|44->45->46/-1/-1 [3] -1/-1/-1->45->44|44->45->-1/-1/-1 +r14i7n3:66129:66161 [3] NCCL INFO Trees [0] -1/-1/-1->47->46|46->47->-1/-1/-1 [1] 44/-1/-1->47->46|46->47->44/-1/-1 [2] -1/-1/-1->47->46|46->47->-1/-1/-1 [3] 44/-1/-1->47->46|46->47->44/-1/-1 +r7i4n1:77233:77306 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n1:77232:77301 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r14i7n4:5391:5428 [3] NCCL INFO Trees [0] -1/-1/-1->51->50|50->51->-1/-1/-1 [1] 48/-1/-1->51->50|50->51->48/-1/-1 [2] -1/-1/-1->51->50|50->51->-1/-1/-1 [3] 48/-1/-1->51->50|50->51->48/-1/-1 +r14i7n4:5389:5423 [1] NCCL INFO Trees [0] 50/-1/-1->49->48|48->49->50/-1/-1 [1] -1/-1/-1->49->48|48->49->-1/-1/-1 [2] 50/-1/-1->49->48|48->49->50/-1/-1 [3] -1/-1/-1->49->48|48->49->-1/-1/-1 +r14i7n4:5388:5438 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r14i7n4:5391:5428 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r14i7n4:5389:5423 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r14i7n4:5390:5433 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r14i7n5:42529:42568 [1] NCCL INFO Trees [0] 54/-1/-1->53->52|52->53->54/-1/-1 [1] -1/-1/-1->53->52|52->53->-1/-1/-1 [2] 54/-1/-1->53->52|52->53->54/-1/-1 [3] -1/-1/-1->53->52|52->53->-1/-1/-1 +r14i7n5:42530:42573 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r14i7n5:42531:42567 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r14i7n5:42528:42578 [0] NCCL INFO Trees [0] 53/-1/-1->52->56|56->52->53/-1/-1 [1] 53/-1/-1->52->55|55->52->53/-1/-1 [2] 53/44/60->52->36|36->52->53/44/60 [3] 53/-1/-1->52->55|55->52->53/-1/-1 +r14i7n5:42529:42568 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r14i7n6:27451:27498 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n6:27448:27488 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n6:27451:27498 [3] NCCL INFO Trees [0] -1/-1/-1->59->58|58->59->-1/-1/-1 [1] 56/-1/-1->59->58|58->59->56/-1/-1 [2] -1/-1/-1->59->58|58->59->-1/-1/-1 [3] 56/-1/-1->59->58|58->59->56/-1/-1 +r14i7n6:27448:27488 [0] NCCL INFO Trees [0] 57/52/60->56->48|48->56->57/52/60 [1] 57/-1/-1->56->59|59->56->57/-1/-1 [2] 57/-1/-1->56->60|60->56->57/-1/-1 [3] 57/-1/-1->56->59|59->56->57/-1/-1 +r14i7n6:27449:27483 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r14i7n6:27450:27493 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r14i7n6:27448:27488 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i4n1:77231:77291 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n5:42528:42578 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r14i7n6:27451:27498 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r14i7n3:66128:66176 [2] NCCL INFO Trees [0] 47/-1/-1->46->45|45->46->47/-1/-1 [1] 47/-1/-1->46->42|42->46->47/-1/-1 [2] 47/-1/-1->46->45|45->46->47/-1/-1 [3] 47/42/50->46->54|54->46->47/42/50 +r7i4n1:77231:77291 [0] NCCL INFO Trees [0] 1/32/-1->0->-1|-1->0->1/32/-1 [1] 1/-1/-1->0->3|3->0->1/-1/-1 [2] 1/-1/-1->0->60|60->0->1/-1/-1 [3] 1/-1/-1->0->3|3->0->1/-1/-1 +r7i4n1:77231:77291 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r14i7n3:66126:66171 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r14i7n3:66129:66161 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r14i7n3:66128:66176 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r14i7n3:66127:66166 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r14i7n7:81280:81319 [1] NCCL INFO Trees [0] 62/-1/-1->61->60|60->61->62/-1/-1 [1] -1/-1/-1->61->60|60->61->-1/-1/-1 [2] 62/-1/-1->61->60|60->61->62/-1/-1 [3] -1/-1/-1->61->60|60->61->-1/-1/-1 +r14i7n7:81282:81324 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r14i7n7:81279:81314 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r14i7n7:81281:81329 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r14i7n7:81280:81319 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r14i7n2:39447:39496 [2] NCCL INFO Trees [0] 43/-1/-1->42->41|41->42->43/-1/-1 [1] 43/38/46->42->50|50->42->43/38/46 [2] 43/-1/-1->42->41|41->42->43/-1/-1 [3] 43/-1/-1->42->46|46->42->43/-1/-1 +r14i7n2:39448:39486 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r14i7n2:39446:39481 [1] NCCL INFO Trees [0] 42/-1/-1->41->40|40->41->42/-1/-1 [1] -1/-1/-1->41->40|40->41->-1/-1/-1 [2] 42/-1/-1->41->40|40->41->42/-1/-1 [3] -1/-1/-1->41->40|40->41->-1/-1/-1 +r14i7n2:39448:39486 [3] NCCL INFO Trees [0] -1/-1/-1->43->42|42->43->-1/-1/-1 [1] 40/-1/-1->43->42|42->43->40/-1/-1 [2] -1/-1/-1->43->42|42->43->-1/-1/-1 [3] 40/-1/-1->43->42|42->43->40/-1/-1 +r14i7n2:39447:39496 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r14i7n2:39445:39491 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r14i7n2:39446:39481 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r14i7n2:39448:39486 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r14i7n5:42529:42568 [1] NCCL INFO Channel 00 : 53[1c000] -> 54[88000] via P2P/IPC +r8i3n1:64579:64613 [1] NCCL INFO Channel 00 : 21[1c000] -> 22[88000] via P2P/IPC +r8i2n1:41758:41803 [1] NCCL INFO Channel 00 : 17[1c000] -> 18[88000] via P2P/IPC +r8i5n2:15799:15848 [1] NCCL INFO Channel 00 : 29[1c000] -> 30[88000] via P2P/IPC +r8i1n7:45556:45605 [1] NCCL INFO Channel 00 : 13[1c000] -> 14[88000] via P2P/IPC +r7i4n1:77232:77301 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r14i7n6:27449:27483 [1] NCCL INFO Channel 00 : 57[1c000] -> 58[88000] via P2P/IPC +r14i7n1:29726:29770 [1] NCCL INFO Channel 00 : 37[1c000] -> 38[88000] via P2P/IPC +r14i7n0:25488:25537 [1] NCCL INFO Channel 00 : 33[1c000] -> 34[88000] via P2P/IPC +r8i5n2:15800:15838 [2] NCCL INFO Channel 00 : 30[88000] -> 31[8a000] via P2P/IPC +r7i6n2:60565:60609 [1] NCCL INFO Channel 00 : 5[1c000] -> 6[88000] via P2P/IPC +r8i3n6:76353:76397 [1] NCCL INFO Channel 00 : 25[1c000] -> 26[88000] via P2P/IPC +r8i1n1:21964:22013 [1] NCCL INFO Channel 00 : 9[1c000] -> 10[88000] via P2P/IPC +r8i3n6:76354:76402 [2] NCCL INFO Channel 00 : 26[88000] -> 27[8a000] via P2P/IPC +r14i7n7:81280:81319 [1] NCCL INFO Channel 00 : 61[1c000] -> 62[88000] via P2P/IPC +r14i7n3:66127:66166 [1] NCCL INFO Channel 00 : 45[1c000] -> 46[88000] via P2P/IPC +r14i7n6:27450:27493 [2] NCCL INFO Channel 00 : 58[88000] -> 59[8a000] via P2P/IPC +r14i7n2:39446:39481 [1] NCCL INFO Channel 00 : 41[1c000] -> 42[88000] via P2P/IPC +r14i7n7:81281:81329 [2] NCCL INFO Channel 00 : 62[88000] -> 63[8a000] via P2P/IPC +r8i2n1:41759:41798 [2] NCCL INFO Channel 00 : 18[88000] -> 19[8a000] via P2P/IPC +r14i7n4:5389:5423 [1] NCCL INFO Channel 00 : 49[1c000] -> 50[88000] via P2P/IPC +r14i7n4:5390:5433 [2] NCCL INFO Channel 00 : 50[88000] -> 51[8a000] via P2P/IPC +r14i7n2:39447:39496 [2] NCCL INFO Channel 00 : 42[88000] -> 43[8a000] via P2P/IPC +r7i4n1:77233:77306 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r14i7n0:25489:25532 [2] NCCL INFO Channel 00 : 34[88000] -> 35[8a000] via P2P/IPC +r14i7n1:29727:29760 [2] NCCL INFO Channel 00 : 38[88000] -> 39[8a000] via P2P/IPC +r7i6n2:60566:60604 [2] NCCL INFO Channel 00 : 6[88000] -> 7[8a000] via P2P/IPC +r14i7n3:66128:66176 [2] NCCL INFO Channel 00 : 46[88000] -> 47[8a000] via P2P/IPC +r8i3n1:64580:64623 [2] NCCL INFO Channel 00 : 22[88000] -> 23[8a000] via P2P/IPC +r8i1n1:21965:22008 [2] NCCL INFO Channel 00 : 10[88000] -> 11[8a000] via P2P/IPC +r14i7n5:42530:42573 [2] NCCL INFO Channel 00 : 54[88000] -> 55[8a000] via P2P/IPC +r8i1n7:45557:45600 [2] NCCL INFO Channel 00 : 14[88000] -> 15[8a000] via P2P/IPC +r8i5n2:15800:15838 [2] NCCL INFO Channel 00 : 30[88000] -> 29[1c000] via P2P/IPC +r14i7n6:27450:27493 [2] NCCL INFO Channel 00 : 58[88000] -> 57[1c000] via P2P/IPC +r8i3n6:76354:76402 [2] NCCL INFO Channel 00 : 26[88000] -> 25[1c000] via P2P/IPC +r8i2n1:41759:41798 [2] NCCL INFO Channel 00 : 18[88000] -> 17[1c000] via P2P/IPC +r14i7n7:81281:81329 [2] NCCL INFO Channel 00 : 62[88000] -> 61[1c000] via P2P/IPC +r7i4n1:77233:77306 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r14i7n2:39447:39496 [2] NCCL INFO Channel 00 : 42[88000] -> 41[1c000] via P2P/IPC +r14i7n4:5390:5433 [2] NCCL INFO Channel 00 : 50[88000] -> 49[1c000] via P2P/IPC +r14i7n3:66128:66176 [2] NCCL INFO Channel 00 : 46[88000] -> 45[1c000] via P2P/IPC +r7i6n2:60566:60604 [2] NCCL INFO Channel 00 : 6[88000] -> 5[1c000] via P2P/IPC +r14i7n0:25489:25532 [2] NCCL INFO Channel 00 : 34[88000] -> 33[1c000] via P2P/IPC +r14i7n1:29727:29760 [2] NCCL INFO Channel 00 : 38[88000] -> 37[1c000] via P2P/IPC +r8i3n1:64580:64623 [2] NCCL INFO Channel 00 : 22[88000] -> 21[1c000] via P2P/IPC +r8i1n1:21965:22008 [2] NCCL INFO Channel 00 : 10[88000] -> 9[1c000] via P2P/IPC +r14i7n5:42530:42573 [2] NCCL INFO Channel 00 : 54[88000] -> 53[1c000] via P2P/IPC +r8i1n7:45557:45600 [2] NCCL INFO Channel 00 : 14[88000] -> 13[1c000] via P2P/IPC +r8i2n1:41757:41793 [0] NCCL INFO Channel 00 : 15[8a000] -> 16[1a000] [receive] via NET/IB/3 +r8i1n7:45555:45595 [0] NCCL INFO Channel 00 : 11[8a000] -> 12[1a000] [receive] via NET/IB/3 +r14i7n0:25487:25522 [0] NCCL INFO Channel 00 : 31[8a000] -> 32[1a000] [receive] via NET/IB/3 +r14i7n5:42528:42578 [0] NCCL INFO Channel 00 : 51[8a000] -> 52[1a000] [receive] via NET/IB/3 +r8i1n1:21963:22007 [0] NCCL INFO Channel 00 : 7[8a000] -> 8[1a000] [receive] via NET/IB/3 +r14i7n1:29725:29775 [0] NCCL INFO Channel 00 : 35[8a000] -> 36[1a000] [receive] via NET/IB/3 +r8i3n1:64578:64628 [0] NCCL INFO Channel 00 : 19[8a000] -> 20[1a000] [receive] via NET/IB/3 +r14i7n3:66126:66171 [0] NCCL INFO Channel 00 : 43[8a000] -> 44[1a000] [receive] via NET/IB/3 +r14i7n5:42528:42578 [0] NCCL INFO Channel 00 : 52[1a000] -> 53[1c000] via P2P/IPC +r7i6n2:60564:60599 [0] NCCL INFO Channel 00 : 3[8a000] -> 4[1a000] [receive] via NET/IB/3 +r14i7n7:81279:81314 [0] NCCL INFO Channel 00 : 59[8a000] -> 60[1a000] [receive] via NET/IB/3 +r8i5n2:15798:15843 [0] NCCL INFO Channel 00 : 27[8a000] -> 28[1a000] [receive] via NET/IB/3 +r8i2n1:41757:41793 [0] NCCL INFO Channel 00 : 16[1a000] -> 17[1c000] via P2P/IPC +r14i7n2:39445:39491 [0] NCCL INFO Channel 00 : 39[8a000] -> 40[1a000] [receive] via NET/IB/3 +r14i7n6:27448:27488 [0] NCCL INFO Channel 00 : 55[8a000] -> 56[1a000] [receive] via NET/IB/3 +r8i3n6:76352:76387 [0] NCCL INFO Channel 00 : 23[8a000] -> 24[1a000] [receive] via NET/IB/3 +r7i4n1:77231:77291 [0] NCCL INFO Channel 00 : 63[8a000] -> 0[1a000] [receive] via NET/IB/3 +r8i1n1:21963:22007 [0] NCCL INFO Channel 00 : 8[1a000] -> 9[1c000] via P2P/IPC +r14i7n7:81279:81314 [0] NCCL INFO Channel 00 : 60[1a000] -> 61[1c000] via P2P/IPC +r14i7n0:25487:25522 [0] NCCL INFO Channel 00 : 32[1a000] -> 33[1c000] via P2P/IPC +r8i1n7:45555:45595 [0] NCCL INFO Channel 00 : 12[1a000] -> 13[1c000] via P2P/IPC +r8i5n2:15798:15843 [0] NCCL INFO Channel 00 : 28[1a000] -> 29[1c000] via P2P/IPC +r8i3n6:76355:76396 [3] NCCL INFO Channel 00 : 27[8a000] -> 28[1a000] [send] via NET/IB/2 +r7i6n2:60564:60599 [0] NCCL INFO Channel 00 : 4[1a000] -> 5[1c000] via P2P/IPC +r14i7n1:29725:29775 [0] NCCL INFO Channel 00 : 36[1a000] -> 37[1c000] via P2P/IPC +r14i7n2:39445:39491 [0] NCCL INFO Channel 00 : 40[1a000] -> 41[1c000] via P2P/IPC +r8i5n2:15801:15833 [3] NCCL INFO Channel 00 : 31[8a000] -> 32[1a000] [send] via NET/IB/2 +r7i4n1:77234:77296 [3] NCCL INFO Channel 00 : 3[8a000] -> 4[1a000] [send] via NET/IB/2 +r8i3n1:64578:64628 [0] NCCL INFO Channel 00 : 20[1a000] -> 21[1c000] via P2P/IPC +r14i7n3:66126:66171 [0] NCCL INFO Channel 00 : 44[1a000] -> 45[1c000] via P2P/IPC +r8i2n1:41760:41808 [3] NCCL INFO Channel 00 : 19[8a000] -> 20[1a000] [send] via NET/IB/2 +r14i7n7:81282:81324 [3] NCCL INFO Channel 00 : 63[8a000] -> 0[1a000] [send] via NET/IB/2 +r14i7n6:27448:27488 [0] NCCL INFO Channel 00 : 56[1a000] -> 57[1c000] via P2P/IPC +r14i7n6:27451:27498 [3] NCCL INFO Channel 00 : 59[8a000] -> 60[1a000] [send] via NET/IB/2 +r14i7n4:5388:5438 [0] NCCL INFO Channel 00 : 47[8a000] -> 48[1a000] [receive] via NET/IB/3 +r8i3n6:76352:76387 [0] NCCL INFO Channel 00 : 24[1a000] -> 25[1c000] via P2P/IPC +r14i7n2:39448:39486 [3] NCCL INFO Channel 00 : 43[8a000] -> 44[1a000] [send] via NET/IB/2 +r14i7n0:25490:25527 [3] NCCL INFO Channel 00 : 35[8a000] -> 36[1a000] [send] via NET/IB/2 +r14i7n4:5391:5428 [3] NCCL INFO Channel 00 : 51[8a000] -> 52[1a000] [send] via NET/IB/2 +r7i6n2:60567:60614 [3] NCCL INFO Channel 00 : 7[8a000] -> 8[1a000] [send] via NET/IB/2 +r8i1n7:45558:45590 [3] NCCL INFO Channel 00 : 15[8a000] -> 16[1a000] [send] via NET/IB/2 +r8i1n1:21966:21998 [3] NCCL INFO Channel 00 : 11[8a000] -> 12[1a000] [send] via NET/IB/2 +r8i3n1:64581:64618 [3] NCCL INFO Channel 00 : 23[8a000] -> 24[1a000] [send] via NET/IB/2 +r14i7n3:66129:66161 [3] NCCL INFO Channel 00 : 47[8a000] -> 48[1a000] [send] via NET/IB/2 +r14i7n1:29728:29769 [3] NCCL INFO Channel 00 : 39[8a000] -> 40[1a000] [send] via NET/IB/2 +r14i7n4:5388:5438 [0] NCCL INFO Channel 00 : 48[1a000] -> 49[1c000] via P2P/IPC +r14i7n5:42531:42567 [3] NCCL INFO Channel 00 : 55[8a000] -> 56[1a000] [send] via NET/IB/2 +r8i2n1:41758:41803 [1] NCCL INFO Channel 00 : 17[1c000] -> 16[1a000] via P2P/IPC +r14i7n5:42529:42568 [1] NCCL INFO Channel 00 : 53[1c000] -> 52[1a000] via P2P/IPC +r14i7n0:25488:25537 [1] NCCL INFO Channel 00 : 33[1c000] -> 32[1a000] via P2P/IPC +r8i1n1:21964:22013 [1] NCCL INFO Channel 00 : 9[1c000] -> 8[1a000] via P2P/IPC +r14i7n7:81280:81319 [1] NCCL INFO Channel 00 : 61[1c000] -> 60[1a000] via P2P/IPC +r8i1n7:45556:45605 [1] NCCL INFO Channel 00 : 13[1c000] -> 12[1a000] via P2P/IPC +r14i7n1:29726:29770 [1] NCCL INFO Channel 00 : 37[1c000] -> 36[1a000] via P2P/IPC +r8i5n2:15799:15848 [1] NCCL INFO Channel 00 : 29[1c000] -> 28[1a000] via P2P/IPC +r7i6n2:60565:60609 [1] NCCL INFO Channel 00 : 5[1c000] -> 4[1a000] via P2P/IPC +r14i7n3:66127:66166 [1] NCCL INFO Channel 00 : 45[1c000] -> 44[1a000] via P2P/IPC +r14i7n6:27449:27483 [1] NCCL INFO Channel 00 : 57[1c000] -> 56[1a000] via P2P/IPC +r14i7n2:39446:39481 [1] NCCL INFO Channel 00 : 41[1c000] -> 40[1a000] via P2P/IPC +r8i3n1:64579:64613 [1] NCCL INFO Channel 00 : 21[1c000] -> 20[1a000] via P2P/IPC +r8i3n6:76353:76397 [1] NCCL INFO Channel 00 : 25[1c000] -> 24[1a000] via P2P/IPC +r7i4n1:77234:77296 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r8i5n2:15801:15833 [3] NCCL INFO Channel 00 : 31[8a000] -> 30[88000] via P2P/IPC +r14i7n2:39448:39486 [3] NCCL INFO Channel 00 : 43[8a000] -> 42[88000] via P2P/IPC +r8i2n1:41760:41808 [3] NCCL INFO Channel 00 : 19[8a000] -> 18[88000] via P2P/IPC +r14i7n6:27451:27498 [3] NCCL INFO Channel 00 : 59[8a000] -> 58[88000] via P2P/IPC +r14i7n4:5389:5423 [1] NCCL INFO Channel 00 : 49[1c000] -> 48[1a000] via P2P/IPC +r8i1n1:21966:21998 [3] NCCL INFO Channel 00 : 11[8a000] -> 10[88000] via P2P/IPC +r8i3n6:76355:76396 [3] NCCL INFO Channel 00 : 27[8a000] -> 26[88000] via P2P/IPC +r8i1n7:45558:45590 [3] NCCL INFO Channel 00 : 15[8a000] -> 14[88000] via P2P/IPC +r14i7n1:29728:29769 [3] NCCL INFO Channel 00 : 39[8a000] -> 38[88000] via P2P/IPC +r14i7n3:66129:66161 [3] NCCL INFO Channel 00 : 47[8a000] -> 46[88000] via P2P/IPC +r7i4n1:77234:77296 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r14i7n0:25490:25527 [3] NCCL INFO Channel 00 : 35[8a000] -> 34[88000] via P2P/IPC +r8i3n1:64581:64618 [3] NCCL INFO Channel 00 : 23[8a000] -> 22[88000] via P2P/IPC +r14i7n4:5391:5428 [3] NCCL INFO Channel 00 : 51[8a000] -> 50[88000] via P2P/IPC +r7i6n2:60567:60614 [3] NCCL INFO Channel 00 : 7[8a000] -> 6[88000] via P2P/IPC +r14i7n5:42531:42567 [3] NCCL INFO Channel 00 : 55[8a000] -> 54[88000] via P2P/IPC +r14i7n6:27450:27493 [2] NCCL INFO Channel 01 : 58[88000] -> 57[1c000] via P2P/IPC +r14i7n6:27451:27498 [3] NCCL INFO Channel 01 : 59[8a000] -> 58[88000] via P2P/IPC +r14i7n2:39448:39486 [3] NCCL INFO Channel 01 : 43[8a000] -> 42[88000] via P2P/IPC +r8i5n2:15800:15838 [2] NCCL INFO Channel 01 : 30[88000] -> 29[1c000] via P2P/IPC +r8i2n1:41760:41808 [3] NCCL INFO Channel 01 : 19[8a000] -> 18[88000] via P2P/IPC +r8i5n2:15801:15833 [3] NCCL INFO Channel 01 : 31[8a000] -> 30[88000] via P2P/IPC +r14i7n2:39447:39496 [2] NCCL INFO Channel 01 : 42[88000] -> 41[1c000] via P2P/IPC +r8i3n6:76354:76402 [2] NCCL INFO Channel 01 : 26[88000] -> 25[1c000] via P2P/IPC +r8i2n1:41759:41798 [2] NCCL INFO Channel 01 : 18[88000] -> 17[1c000] via P2P/IPC +r8i1n1:21965:22008 [2] NCCL INFO Channel 01 : 10[88000] -> 9[1c000] via P2P/IPC +r8i3n6:76355:76396 [3] NCCL INFO Channel 01 : 27[8a000] -> 26[88000] via P2P/IPC +r8i1n1:21966:21998 [3] NCCL INFO Channel 01 : 11[8a000] -> 10[88000] via P2P/IPC +r14i7n3:66128:66176 [2] NCCL INFO Channel 01 : 46[88000] -> 45[1c000] via P2P/IPC +r14i7n1:29727:29760 [2] NCCL INFO Channel 01 : 38[88000] -> 37[1c000] via P2P/IPC +r14i7n0:25489:25532 [2] NCCL INFO Channel 01 : 34[88000] -> 33[1c000] via P2P/IPC +r14i7n1:29728:29769 [3] NCCL INFO Channel 01 : 39[8a000] -> 38[88000] via P2P/IPC +r8i1n7:45557:45600 [2] NCCL INFO Channel 01 : 14[88000] -> 13[1c000] via P2P/IPC +r14i7n3:66129:66161 [3] NCCL INFO Channel 01 : 47[8a000] -> 46[88000] via P2P/IPC +r14i7n0:25490:25527 [3] NCCL INFO Channel 01 : 35[8a000] -> 34[88000] via P2P/IPC +r8i1n7:45558:45590 [3] NCCL INFO Channel 01 : 15[8a000] -> 14[88000] via P2P/IPC +r8i3n1:64580:64623 [2] NCCL INFO Channel 01 : 22[88000] -> 21[1c000] via P2P/IPC +r8i3n1:64581:64618 [3] NCCL INFO Channel 01 : 23[8a000] -> 22[88000] via P2P/IPC +r14i7n4:5390:5433 [2] NCCL INFO Channel 01 : 50[88000] -> 49[1c000] via P2P/IPC +r7i6n2:60566:60604 [2] NCCL INFO Channel 01 : 6[88000] -> 5[1c000] via P2P/IPC +r14i7n4:5391:5428 [3] NCCL INFO Channel 01 : 51[8a000] -> 50[88000] via P2P/IPC +r7i6n2:60567:60614 [3] NCCL INFO Channel 01 : 7[8a000] -> 6[88000] via P2P/IPC +r14i7n5:42531:42567 [3] NCCL INFO Channel 01 : 55[8a000] -> 54[88000] via P2P/IPC +r14i7n5:42530:42573 [2] NCCL INFO Channel 01 : 54[88000] -> 53[1c000] via P2P/IPC +r14i7n7:81279:81314 [0] NCCL INFO Channel 00 : 60[1a000] -> 56[1a000] [send] via NET/IB/3 +r8i5n2:15798:15843 [0] NCCL INFO Channel 00 : 28[1a000] -> 24[1a000] [send] via NET/IB/3 +r14i7n0:25487:25522 [0] NCCL INFO Channel 00 : 16[1a000] -> 32[1a000] [receive] via NET/IB/3 +r14i7n3:66126:66171 [0] NCCL INFO Channel 00 : 44[1a000] -> 40[1a000] [send] via NET/IB/3 +r8i1n7:45555:45595 [0] NCCL INFO Channel 00 : 12[1a000] -> 8[1a000] [send] via NET/IB/3 +r7i6n2:60564:60599 [0] NCCL INFO Channel 00 : 4[1a000] -> 8[1a000] [send] via NET/IB/3 +r14i7n7:81280:81319 [1] NCCL INFO Channel 01 : 61[1c000] -> 0[1a000] [send] via NET/IB/3 +r14i7n1:29725:29775 [0] NCCL INFO Channel 00 : 36[1a000] -> 40[1a000] [send] via NET/IB/3 +r8i3n1:64578:64628 [0] NCCL INFO Channel 00 : 20[1a000] -> 24[1a000] [send] via NET/IB/3 +r8i2n1:41757:41793 [0] NCCL INFO Channel 00 : 8[1a000] -> 16[1a000] [receive] via NET/IB/3 +r14i7n2:39445:39491 [0] NCCL INFO Channel 00 : 36[1a000] -> 40[1a000] [receive] via NET/IB/3 +r14i7n5:42528:42578 [0] NCCL INFO Channel 00 : 52[1a000] -> 56[1a000] [send] via NET/IB/3 +r14i7n4:5388:5438 [0] NCCL INFO Channel 00 : 40[1a000] -> 48[1a000] [receive] via NET/IB/3 +r8i1n1:21963:22007 [0] NCCL INFO Channel 00 : 4[1a000] -> 8[1a000] [receive] via NET/IB/3 +r8i3n6:76352:76387 [0] NCCL INFO Channel 00 : 20[1a000] -> 24[1a000] [receive] via NET/IB/3 +r14i7n6:27448:27488 [0] NCCL INFO Channel 00 : 52[1a000] -> 56[1a000] [receive] via NET/IB/3 +r8i5n2:15799:15848 [1] NCCL INFO Channel 01 : 29[1c000] -> 32[1a000] [send] via NET/IB/3 +r14i7n3:66127:66166 [1] NCCL INFO Channel 01 : 45[1c000] -> 48[1a000] [send] via NET/IB/3 +r14i7n0:25488:25537 [1] NCCL INFO Channel 01 : 33[1c000] -> 36[1a000] [send] via NET/IB/3 +r8i5n2:15800:15838 [2] NCCL INFO Channel 01 : 30[88000] -> 26[88000] [send] via NET/IB/2 +r7i6n2:60565:60609 [1] NCCL INFO Channel 01 : 5[1c000] -> 8[1a000] [send] via NET/IB/3 +r14i7n4:5389:5423 [1] NCCL INFO Channel 01 : 49[1c000] -> 52[1a000] [send] via NET/IB/3 +r8i1n7:45556:45605 [1] NCCL INFO Channel 01 : 13[1c000] -> 16[1a000] [send] via NET/IB/3 +r14i7n2:39446:39481 [1] NCCL INFO Channel 01 : 41[1c000] -> 44[1a000] [send] via NET/IB/3 +r8i2n1:41758:41803 [1] NCCL INFO Channel 01 : 17[1c000] -> 20[1a000] [send] via NET/IB/3 +r14i7n6:27449:27483 [1] NCCL INFO Channel 01 : 57[1c000] -> 60[1a000] [send] via NET/IB/3 +r8i3n1:64579:64613 [1] NCCL INFO Channel 01 : 21[1c000] -> 24[1a000] [send] via NET/IB/3 +r8i1n1:21964:22013 [1] NCCL INFO Channel 01 : 9[1c000] -> 12[1a000] [send] via NET/IB/3 +r14i7n5:42529:42568 [1] NCCL INFO Channel 01 : 53[1c000] -> 56[1a000] [send] via NET/IB/3 +r14i7n2:39447:39496 [2] NCCL INFO Channel 01 : 38[88000] -> 42[88000] [receive] via NET/IB/2 +r14i7n3:66128:66176 [2] NCCL INFO Channel 01 : 46[88000] -> 42[88000] [send] via NET/IB/2 +r8i2n1:41759:41798 [2] NCCL INFO Channel 01 : 10[88000] -> 18[88000] [receive] via NET/IB/2 +r14i7n6:27450:27493 [2] NCCL INFO Channel 01 : 54[88000] -> 58[88000] [receive] via NET/IB/2 +r8i3n6:76353:76397 [1] NCCL INFO Channel 01 : 25[1c000] -> 28[1a000] [send] via NET/IB/3 +r14i7n1:29726:29770 [1] NCCL INFO Channel 01 : 37[1c000] -> 40[1a000] [send] via NET/IB/3 +r14i7n0:25489:25532 [2] NCCL INFO Channel 01 : 18[88000] -> 34[88000] [receive] via NET/IB/2 +r8i3n6:76354:76402 [2] NCCL INFO Channel 01 : 22[88000] -> 26[88000] [receive] via NET/IB/2 +r8i1n1:21965:22008 [2] NCCL INFO Channel 01 : 6[88000] -> 10[88000] [receive] via NET/IB/2 +r8i1n7:45557:45600 [2] NCCL INFO Channel 01 : 14[88000] -> 10[88000] [send] via NET/IB/2 +r14i7n1:29727:29760 [2] NCCL INFO Channel 01 : 38[88000] -> 42[88000] [send] via NET/IB/2 +r8i3n1:64580:64623 [2] NCCL INFO Channel 01 : 22[88000] -> 26[88000] [send] via NET/IB/2 +r14i7n4:5390:5433 [2] NCCL INFO Channel 01 : 42[88000] -> 50[88000] [receive] via NET/IB/2 +r7i6n2:60566:60604 [2] NCCL INFO Channel 01 : 6[88000] -> 10[88000] [send] via NET/IB/2 +r14i7n5:42530:42573 [2] NCCL INFO Channel 01 : 54[88000] -> 58[88000] [send] via NET/IB/2 +r14i7n0:25487:25522 [0] NCCL INFO Channel 00 : 48[1a000] -> 32[1a000] [receive] via NET/IB/3 +r8i2n1:41757:41793 [0] NCCL INFO Channel 00 : 24[1a000] -> 16[1a000] [receive] via NET/IB/3 +r14i7n2:39445:39491 [0] NCCL INFO Channel 00 : 44[1a000] -> 40[1a000] [receive] via NET/IB/3 +r8i1n1:21963:22007 [0] NCCL INFO Channel 00 : 12[1a000] -> 8[1a000] [receive] via NET/IB/3 +r14i7n4:5388:5438 [0] NCCL INFO Channel 00 : 56[1a000] -> 48[1a000] [receive] via NET/IB/3 +r14i7n6:27448:27488 [0] NCCL INFO Channel 00 : 60[1a000] -> 56[1a000] [receive] via NET/IB/3 +r8i3n6:76352:76387 [0] NCCL INFO Channel 00 : 28[1a000] -> 24[1a000] [receive] via NET/IB/3 +r7i6n2:60564:60599 [0] NCCL INFO Channel 00 : 8[1a000] -> 4[1a000] [receive] via NET/IB/3 +r8i2n1:41759:41798 [2] NCCL INFO Channel 01 : 26[88000] -> 18[88000] [receive] via NET/IB/2 +r14i7n6:27450:27493 [2] NCCL INFO Channel 01 : 62[88000] -> 58[88000] [receive] via NET/IB/2 +r14i7n2:39447:39496 [2] NCCL INFO Channel 01 : 46[88000] -> 42[88000] [receive] via NET/IB/2 +r14i7n0:25489:25532 [2] NCCL INFO Channel 01 : 50[88000] -> 34[88000] [receive] via NET/IB/2 +r8i3n6:76354:76402 [2] NCCL INFO Channel 01 : 30[88000] -> 26[88000] [receive] via NET/IB/2 +r14i7n1:29725:29775 [0] NCCL INFO Channel 00 : 40[1a000] -> 36[1a000] [receive] via NET/IB/3 +r8i1n1:21965:22008 [2] NCCL INFO Channel 01 : 14[88000] -> 10[88000] [receive] via NET/IB/2 +r14i7n5:42528:42578 [0] NCCL INFO Channel 00 : 56[1a000] -> 52[1a000] [receive] via NET/IB/3 +r8i3n1:64578:64628 [0] NCCL INFO Channel 00 : 24[1a000] -> 20[1a000] [receive] via NET/IB/3 +r14i7n4:5390:5433 [2] NCCL INFO Channel 01 : 58[88000] -> 50[88000] [receive] via NET/IB/2 +r8i3n1:64580:64623 [2] NCCL INFO Channel 01 : 26[88000] -> 22[88000] [receive] via NET/IB/2 +r14i7n1:29727:29760 [2] NCCL INFO Channel 01 : 42[88000] -> 38[88000] [receive] via NET/IB/2 +r7i6n2:60566:60604 [2] NCCL INFO Channel 01 : 10[88000] -> 6[88000] [receive] via NET/IB/2 +r8i3n1:64580:64623 [2] NCCL INFO Channel 01 : 22[88000] -> 23[8a000] via P2P/IPC +r14i7n1:29727:29760 [2] NCCL INFO Channel 01 : 38[88000] -> 39[8a000] via P2P/IPC +r7i6n2:60566:60604 [2] NCCL INFO Channel 01 : 6[88000] -> 7[8a000] via P2P/IPC +r14i7n5:42530:42573 [2] NCCL INFO Channel 01 : 58[88000] -> 54[88000] [receive] via NET/IB/2 +r14i7n5:42530:42573 [2] NCCL INFO Channel 01 : 54[88000] -> 55[8a000] via P2P/IPC +r14i7n0:25487:25522 [0] NCCL INFO Channel 00 : 32[1a000] -> 0[1a000] [send] via NET/IB/3 +r8i2n1:41757:41793 [0] NCCL INFO Channel 00 : 16[1a000] -> 32[1a000] [send] via NET/IB/3 +r8i1n1:21963:22007 [0] NCCL INFO Channel 00 : 8[1a000] -> 16[1a000] [send] via NET/IB/3 +r14i7n2:39445:39491 [0] NCCL INFO Channel 00 : 40[1a000] -> 48[1a000] [send] via NET/IB/3 +r14i7n4:5388:5438 [0] NCCL INFO Channel 00 : 48[1a000] -> 32[1a000] [send] via NET/IB/3 +r14i7n6:27448:27488 [0] NCCL INFO Channel 00 : 56[1a000] -> 48[1a000] [send] via NET/IB/3 +r8i3n6:76352:76387 [0] NCCL INFO Channel 00 : 24[1a000] -> 16[1a000] [send] via NET/IB/3 +r14i7n7:81279:81314 [0] NCCL INFO Channel 00 : 56[1a000] -> 60[1a000] [receive] via NET/IB/3 +r8i2n1:41759:41798 [2] NCCL INFO Channel 01 : 18[88000] -> 34[88000] [send] via NET/IB/2 +r14i7n0:25489:25532 [2] NCCL INFO Channel 01 : 34[88000] -> 2[88000] [send] via NET/IB/2 +r14i7n6:27450:27493 [2] NCCL INFO Channel 01 : 58[88000] -> 50[88000] [send] via NET/IB/2 +r14i7n2:39447:39496 [2] NCCL INFO Channel 01 : 42[88000] -> 50[88000] [send] via NET/IB/2 +r8i3n6:76354:76402 [2] NCCL INFO Channel 01 : 26[88000] -> 18[88000] [send] via NET/IB/2 +r8i1n1:21965:22008 [2] NCCL INFO Channel 01 : 10[88000] -> 18[88000] [send] via NET/IB/2 +r14i7n4:5390:5433 [2] NCCL INFO Channel 01 : 50[88000] -> 34[88000] [send] via NET/IB/2 +r7i4n1:77231:77291 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r14i7n3:66128:66176 [2] NCCL INFO Channel 01 : 42[88000] -> 46[88000] [receive] via NET/IB/2 +r8i5n2:15800:15838 [2] NCCL INFO Channel 01 : 26[88000] -> 30[88000] [receive] via NET/IB/2 +r8i1n7:45557:45600 [2] NCCL INFO Channel 01 : 10[88000] -> 14[88000] [receive] via NET/IB/2 +r14i7n3:66128:66176 [2] NCCL INFO Channel 01 : 46[88000] -> 47[8a000] via P2P/IPC +r8i5n2:15800:15838 [2] NCCL INFO Channel 01 : 30[88000] -> 31[8a000] via P2P/IPC +r8i1n7:45557:45600 [2] NCCL INFO Channel 01 : 14[88000] -> 15[8a000] via P2P/IPC +r7i4n1:77232:77301 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r14i7n7:81282:81324 [3] NCCL INFO Channel 00 : 63[8a000] -> 62[88000] via P2P/IPC +r14i7n7:81281:81329 [2] NCCL INFO Channel 01 : 62[88000] -> 61[1c000] via P2P/IPC +r14i7n7:81282:81324 [3] NCCL INFO Channel 01 : 63[8a000] -> 62[88000] via P2P/IPC +r7i4n1:77233:77306 [2] NCCL INFO Channel 01 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n1:77231:77291 [0] NCCL INFO Channel 00 : 32[1a000] -> 0[1a000] [receive] via NET/IB/3 +r14i7n7:81281:81329 [2] NCCL INFO Channel 01 : 62[88000] -> 58[88000] [send] via NET/IB/2 +r7i4n1:77232:77301 [1] NCCL INFO Channel 01 : 1[1c000] -> 4[1a000] [send] via NET/IB/3 +r7i4n1:77233:77306 [2] NCCL INFO Channel 01 : 34[88000] -> 2[88000] [receive] via NET/IB/2 +r8i1n7:45555:45595 [0] NCCL INFO Channel 00 : 8[1a000] -> 12[1a000] [receive] via NET/IB/3 +r14i7n7:81281:81329 [2] NCCL INFO Channel 01 : 58[88000] -> 62[88000] [receive] via NET/IB/2 +r14i7n7:81281:81329 [2] NCCL INFO Channel 01 : 62[88000] -> 63[8a000] via P2P/IPC +r14i7n3:66126:66171 [0] NCCL INFO Channel 00 : 40[1a000] -> 44[1a000] [receive] via NET/IB/3 +r8i5n2:15798:15843 [0] NCCL INFO Channel 00 : 24[1a000] -> 28[1a000] [receive] via NET/IB/3 +r8i3n6:76352:76387 [0] NCCL INFO Channel 00 : 16[1a000] -> 24[1a000] [receive] via NET/IB/3 +r7i4n1:77231:77291 [0] NCCL INFO Channel 00 : 0[1a000] -> 32[1a000] [send] via NET/IB/3 +r8i3n6:76352:76387 [0] NCCL INFO Channel 00 : 24[1a000] -> 20[1a000] [send] via NET/IB/3 +r8i1n1:21965:22008 [2] NCCL INFO Channel 01 : 18[88000] -> 10[88000] [receive] via NET/IB/2 +r14i7n6:27450:27493 [2] NCCL INFO Channel 01 : 50[88000] -> 58[88000] [receive] via NET/IB/2 +r7i4n1:77233:77306 [2] NCCL INFO Channel 01 : 2[88000] -> 3[8a000] via P2P/IPC +r14i7n0:25487:25522 [0] NCCL INFO Channel 00 : 0[1a000] -> 32[1a000] [receive] via NET/IB/3 +r8i1n1:21965:22008 [2] NCCL INFO Channel 01 : 10[88000] -> 11[8a000] via P2P/IPC +r8i2n1:41757:41793 [0] NCCL INFO Channel 00 : 32[1a000] -> 16[1a000] [receive] via NET/IB/3 +r14i7n6:27450:27493 [2] NCCL INFO Channel 01 : 58[88000] -> 59[8a000] via P2P/IPC +r8i1n1:21963:22007 [0] NCCL INFO Channel 00 : 16[1a000] -> 8[1a000] [receive] via NET/IB/3 +r14i7n6:27448:27488 [0] NCCL INFO Channel 00 : 48[1a000] -> 56[1a000] [receive] via NET/IB/3 +r14i7n4:5390:5433 [2] NCCL INFO Channel 01 : 34[88000] -> 50[88000] [receive] via NET/IB/2 +r8i3n6:76354:76402 [2] NCCL INFO Channel 01 : 18[88000] -> 26[88000] [receive] via NET/IB/2 +r14i7n4:5390:5433 [2] NCCL INFO Channel 01 : 50[88000] -> 51[8a000] via P2P/IPC +r8i3n6:76354:76402 [2] NCCL INFO Channel 01 : 26[88000] -> 27[8a000] via P2P/IPC +r14i7n2:39445:39491 [0] NCCL INFO Channel 00 : 48[1a000] -> 40[1a000] [receive] via NET/IB/3 +r14i7n2:39447:39496 [2] NCCL INFO Channel 01 : 50[88000] -> 42[88000] [receive] via NET/IB/2 +r14i7n4:5388:5438 [0] NCCL INFO Channel 00 : 32[1a000] -> 48[1a000] [receive] via NET/IB/3 +r14i7n2:39447:39496 [2] NCCL INFO Channel 01 : 42[88000] -> 43[8a000] via P2P/IPC +r8i3n6:76352:76387 [0] NCCL INFO Channel 00 : 24[1a000] -> 28[1a000] [send] via NET/IB/3 +r7i4n1:77233:77306 [2] NCCL INFO Channel 01 : 2[88000] -> 34[88000] [send] via NET/IB/2 +r14i7n0:25487:25522 [0] NCCL INFO Channel 00 : 32[1a000] -> 16[1a000] [send] via NET/IB/3 +r8i1n1:21965:22008 [2] NCCL INFO Channel 01 : 10[88000] -> 6[88000] [send] via NET/IB/2 +r8i2n1:41757:41793 [0] NCCL INFO Channel 00 : 16[1a000] -> 8[1a000] [send] via NET/IB/3 +r14i7n6:27450:27493 [2] NCCL INFO Channel 01 : 58[88000] -> 54[88000] [send] via NET/IB/2 +r14i7n0:25489:25532 [2] NCCL INFO Channel 01 : 2[88000] -> 34[88000] [receive] via NET/IB/2 +r8i1n1:21963:22007 [0] NCCL INFO Channel 00 : 8[1a000] -> 4[1a000] [send] via NET/IB/3 +r14i7n6:27448:27488 [0] NCCL INFO Channel 00 : 56[1a000] -> 52[1a000] [send] via NET/IB/3 +r7i4n1:77231:77291 [0] NCCL INFO Channel 01 : 61[1c000] -> 0[1a000] [receive] via NET/IB/3 +r14i7n0:25489:25532 [2] NCCL INFO Channel 01 : 34[88000] -> 35[8a000] via P2P/IPC +r7i4n1:77231:77291 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r14i7n4:5390:5433 [2] NCCL INFO Channel 01 : 50[88000] -> 42[88000] [send] via NET/IB/2 +r8i3n6:76354:76402 [2] NCCL INFO Channel 01 : 26[88000] -> 22[88000] [send] via NET/IB/2 +r14i7n2:39445:39491 [0] NCCL INFO Channel 00 : 40[1a000] -> 36[1a000] [send] via NET/IB/3 +r14i7n4:5388:5438 [0] NCCL INFO Channel 00 : 48[1a000] -> 40[1a000] [send] via NET/IB/3 +r14i7n2:39447:39496 [2] NCCL INFO Channel 01 : 42[88000] -> 38[88000] [send] via NET/IB/2 +r8i2n1:41759:41798 [2] NCCL INFO Channel 01 : 34[88000] -> 18[88000] [receive] via NET/IB/2 +r14i7n7:81280:81319 [1] NCCL INFO Channel 01 : 61[1c000] -> 60[1a000] via P2P/IPC +r7i4n1:77234:77296 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i2n1:41759:41798 [2] NCCL INFO Channel 01 : 18[88000] -> 19[8a000] via P2P/IPC +r7i4n1:77233:77306 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r8i3n1:64578:64628 [0] NCCL INFO Channel 01 : 17[1c000] -> 20[1a000] [receive] via NET/IB/3 +r14i7n0:25487:25522 [0] NCCL INFO Channel 00 : 32[1a000] -> 48[1a000] [send] via NET/IB/3 +r8i3n1:64578:64628 [0] NCCL INFO Channel 01 : 20[1a000] -> 23[8a000] via P2P/IPC +r8i2n1:41757:41793 [0] NCCL INFO Channel 00 : 16[1a000] -> 24[1a000] [send] via NET/IB/3 +r8i1n1:21965:22008 [2] NCCL INFO Channel 01 : 10[88000] -> 14[88000] [send] via NET/IB/2 +r14i7n6:27450:27493 [2] NCCL INFO Channel 01 : 58[88000] -> 62[88000] [send] via NET/IB/2 +r8i5n2:15798:15843 [0] NCCL INFO Channel 01 : 25[1c000] -> 28[1a000] [receive] via NET/IB/3 +r8i1n1:21963:22007 [0] NCCL INFO Channel 00 : 8[1a000] -> 12[1a000] [send] via NET/IB/3 +r8i5n2:15798:15843 [0] NCCL INFO Channel 01 : 28[1a000] -> 31[8a000] via P2P/IPC +r14i7n6:27448:27488 [0] NCCL INFO Channel 00 : 56[1a000] -> 60[1a000] [send] via NET/IB/3 +r14i7n0:25489:25532 [2] NCCL INFO Channel 01 : 34[88000] -> 18[88000] [send] via NET/IB/2 +r8i2n1:41758:41803 [1] NCCL INFO Channel 01 : 17[1c000] -> 16[1a000] via P2P/IPC +r8i3n1:64581:64618 [3] NCCL INFO Channel 01 : 23[8a000] -> 20[1a000] via P2P/IPC +r14i7n4:5390:5433 [2] NCCL INFO Channel 01 : 50[88000] -> 58[88000] [send] via NET/IB/2 +r8i3n6:76354:76402 [2] NCCL INFO Channel 01 : 26[88000] -> 30[88000] [send] via NET/IB/2 +r14i7n2:39445:39491 [0] NCCL INFO Channel 00 : 40[1a000] -> 44[1a000] [send] via NET/IB/3 +r14i7n4:5388:5438 [0] NCCL INFO Channel 00 : 48[1a000] -> 56[1a000] [send] via NET/IB/3 +r8i5n2:15801:15833 [3] NCCL INFO Channel 01 : 31[8a000] -> 28[1a000] via P2P/IPC +r8i3n6:76353:76397 [1] NCCL INFO Channel 01 : 25[1c000] -> 24[1a000] via P2P/IPC +r14i7n2:39447:39496 [2] NCCL INFO Channel 01 : 42[88000] -> 46[88000] [send] via NET/IB/2 +r8i2n1:41759:41798 [2] NCCL INFO Channel 01 : 18[88000] -> 10[88000] [send] via NET/IB/2 +r14i7n0:25489:25532 [2] NCCL INFO Channel 01 : 34[88000] -> 50[88000] [send] via NET/IB/2 +r7i6n2:60564:60599 [0] NCCL INFO Channel 01 : 1[1c000] -> 4[1a000] [receive] via NET/IB/3 +r14i7n5:42528:42578 [0] NCCL INFO Channel 01 : 49[1c000] -> 52[1a000] [receive] via NET/IB/3 +r7i6n2:60564:60599 [0] NCCL INFO Channel 01 : 4[1a000] -> 7[8a000] via P2P/IPC +r8i3n6:76352:76387 [0] NCCL INFO Channel 01 : 21[1c000] -> 24[1a000] [receive] via NET/IB/3 +r14i7n5:42528:42578 [0] NCCL INFO Channel 01 : 52[1a000] -> 55[8a000] via P2P/IPC +r8i1n7:45555:45595 [0] NCCL INFO Channel 01 : 9[1c000] -> 12[1a000] [receive] via NET/IB/3 +r14i7n7:81279:81314 [0] NCCL INFO Channel 01 : 57[1c000] -> 60[1a000] [receive] via NET/IB/3 +r14i7n0:25487:25522 [0] NCCL INFO Channel 01 : 29[1c000] -> 32[1a000] [receive] via NET/IB/3 +r8i3n6:76352:76387 [0] NCCL INFO Channel 01 : 24[1a000] -> 27[8a000] via P2P/IPC +r14i7n7:81279:81314 [0] NCCL INFO Channel 01 : 60[1a000] -> 63[8a000] via P2P/IPC +r8i1n7:45555:45595 [0] NCCL INFO Channel 01 : 12[1a000] -> 15[8a000] via P2P/IPC +r14i7n1:29725:29775 [0] NCCL INFO Channel 01 : 33[1c000] -> 36[1a000] [receive] via NET/IB/3 +r14i7n0:25487:25522 [0] NCCL INFO Channel 01 : 32[1a000] -> 35[8a000] via P2P/IPC +r8i1n1:21963:22007 [0] NCCL INFO Channel 01 : 5[1c000] -> 8[1a000] [receive] via NET/IB/3 +r8i2n1:41759:41798 [2] NCCL INFO Channel 01 : 18[88000] -> 26[88000] [send] via NET/IB/2 +r8i2n1:41757:41793 [0] NCCL INFO Channel 01 : 13[1c000] -> 16[1a000] [receive] via NET/IB/3 +r14i7n1:29725:29775 [0] NCCL INFO Channel 01 : 36[1a000] -> 39[8a000] via P2P/IPC +r7i4n1:77232:77301 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i1n1:21963:22007 [0] NCCL INFO Channel 01 : 8[1a000] -> 11[8a000] via P2P/IPC +r8i2n1:41757:41793 [0] NCCL INFO Channel 01 : 16[1a000] -> 19[8a000] via P2P/IPC +r14i7n3:66126:66171 [0] NCCL INFO Channel 01 : 41[1c000] -> 44[1a000] [receive] via NET/IB/3 +r14i7n5:42531:42567 [3] NCCL INFO Channel 01 : 55[8a000] -> 52[1a000] via P2P/IPC +r7i6n2:60567:60614 [3] NCCL INFO Channel 01 : 7[8a000] -> 4[1a000] via P2P/IPC +r14i7n4:5389:5423 [1] NCCL INFO Channel 01 : 49[1c000] -> 48[1a000] via P2P/IPC +r8i3n1:64579:64613 [1] NCCL INFO Channel 01 : 21[1c000] -> 20[1a000] via P2P/IPC +r14i7n6:27448:27488 [0] NCCL INFO Channel 01 : 53[1c000] -> 56[1a000] [receive] via NET/IB/3 +r14i7n3:66126:66171 [0] NCCL INFO Channel 01 : 44[1a000] -> 47[8a000] via P2P/IPC +r14i7n2:39445:39491 [0] NCCL INFO Channel 01 : 37[1c000] -> 40[1a000] [receive] via NET/IB/3 +r8i5n2:15799:15848 [1] NCCL INFO Channel 01 : 29[1c000] -> 28[1a000] via P2P/IPC +r14i7n7:81282:81324 [3] NCCL INFO Channel 01 : 63[8a000] -> 60[1a000] via P2P/IPC +r8i3n6:76355:76396 [3] NCCL INFO Channel 01 : 27[8a000] -> 24[1a000] via P2P/IPC +r14i7n6:27448:27488 [0] NCCL INFO Channel 01 : 56[1a000] -> 59[8a000] via P2P/IPC +r14i7n6:27449:27483 [1] NCCL INFO Channel 01 : 57[1c000] -> 56[1a000] via P2P/IPC +r14i7n2:39445:39491 [0] NCCL INFO Channel 01 : 40[1a000] -> 43[8a000] via P2P/IPC +r8i1n7:45558:45590 [3] NCCL INFO Channel 01 : 15[8a000] -> 12[1a000] via P2P/IPC +r14i7n4:5388:5438 [0] NCCL INFO Channel 01 : 45[1c000] -> 48[1a000] [receive] via NET/IB/3 +r14i7n1:29728:29769 [3] NCCL INFO Channel 01 : 39[8a000] -> 36[1a000] via P2P/IPC +r7i6n2:60565:60609 [1] NCCL INFO Channel 01 : 5[1c000] -> 4[1a000] via P2P/IPC +r7i4n1:77231:77291 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i1n1:21964:22013 [1] NCCL INFO Channel 01 : 9[1c000] -> 8[1a000] via P2P/IPC +r14i7n0:25490:25527 [3] NCCL INFO Channel 01 : 35[8a000] -> 32[1a000] via P2P/IPC +r8i2n1:41760:41808 [3] NCCL INFO Channel 01 : 19[8a000] -> 16[1a000] via P2P/IPC +r8i1n7:45556:45605 [1] NCCL INFO Channel 01 : 13[1c000] -> 12[1a000] via P2P/IPC +r14i7n4:5388:5438 [0] NCCL INFO Channel 01 : 48[1a000] -> 51[8a000] via P2P/IPC +r14i7n0:25488:25537 [1] NCCL INFO Channel 01 : 33[1c000] -> 32[1a000] via P2P/IPC +r8i3n1:64578:64628 [0] NCCL INFO Channel 01 : 20[1a000] -> 21[1c000] via P2P/IPC +r14i7n5:42529:42568 [1] NCCL INFO Channel 01 : 53[1c000] -> 52[1a000] via P2P/IPC +r8i5n2:15798:15843 [0] NCCL INFO Channel 01 : 28[1a000] -> 29[1c000] via P2P/IPC +r14i7n3:66129:66161 [3] NCCL INFO Channel 01 : 47[8a000] -> 44[1a000] via P2P/IPC +r8i1n1:21966:21998 [3] NCCL INFO Channel 01 : 11[8a000] -> 8[1a000] via P2P/IPC +r14i7n1:29726:29770 [1] NCCL INFO Channel 01 : 37[1c000] -> 36[1a000] via P2P/IPC +r14i7n2:39446:39481 [1] NCCL INFO Channel 01 : 41[1c000] -> 40[1a000] via P2P/IPC +r14i7n6:27451:27498 [3] NCCL INFO Channel 01 : 59[8a000] -> 56[1a000] via P2P/IPC +r8i3n6:76352:76387 [0] NCCL INFO Channel 01 : 24[1a000] -> 25[1c000] via P2P/IPC +r14i7n7:81279:81314 [0] NCCL INFO Channel 01 : 60[1a000] -> 61[1c000] via P2P/IPC +r7i6n2:60564:60599 [0] NCCL INFO Channel 01 : 4[1a000] -> 5[1c000] via P2P/IPC +r14i7n3:66127:66166 [1] NCCL INFO Channel 01 : 45[1c000] -> 44[1a000] via P2P/IPC +r8i1n7:45555:45595 [0] NCCL INFO Channel 01 : 12[1a000] -> 13[1c000] via P2P/IPC +r7i4n1:77232:77301 [1] NCCL INFO Channel 02 : 1[1c000] -> 2[88000] via P2P/IPC +r14i7n0:25487:25522 [0] NCCL INFO Channel 01 : 32[1a000] -> 33[1c000] via P2P/IPC +r14i7n2:39448:39486 [3] NCCL INFO Channel 01 : 43[8a000] -> 40[1a000] via P2P/IPC +r14i7n5:42528:42578 [0] NCCL INFO Channel 01 : 52[1a000] -> 53[1c000] via P2P/IPC +r8i2n1:41757:41793 [0] NCCL INFO Channel 01 : 16[1a000] -> 17[1c000] via P2P/IPC +r14i7n4:5391:5428 [3] NCCL INFO Channel 01 : 51[8a000] -> 48[1a000] via P2P/IPC +r8i1n1:21963:22007 [0] NCCL INFO Channel 01 : 8[1a000] -> 9[1c000] via P2P/IPC +r8i3n1:64580:64623 [2] NCCL INFO Channel 02 : 22[88000] -> 23[8a000] via P2P/IPC +r14i7n1:29725:29775 [0] NCCL INFO Channel 01 : 36[1a000] -> 37[1c000] via P2P/IPC +r8i5n2:15799:15848 [1] NCCL INFO Channel 02 : 29[1c000] -> 30[88000] via P2P/IPC +r8i3n1:64579:64613 [1] NCCL INFO Channel 02 : 21[1c000] -> 22[88000] via P2P/IPC +r8i5n2:15800:15838 [2] NCCL INFO Channel 02 : 30[88000] -> 31[8a000] via P2P/IPC +r14i7n6:27448:27488 [0] NCCL INFO Channel 01 : 56[1a000] -> 57[1c000] via P2P/IPC +r7i4n1:77233:77306 [2] NCCL INFO Channel 02 : 2[88000] -> 1[1c000] via P2P/IPC +r14i7n7:81280:81319 [1] NCCL INFO Channel 02 : 61[1c000] -> 62[88000] via P2P/IPC +r14i7n3:66126:66171 [0] NCCL INFO Channel 01 : 44[1a000] -> 45[1c000] via P2P/IPC +r8i3n6:76353:76397 [1] NCCL INFO Channel 02 : 25[1c000] -> 26[88000] via P2P/IPC +r8i3n6:76354:76402 [2] NCCL INFO Channel 02 : 26[88000] -> 27[8a000] via P2P/IPC +r14i7n2:39445:39491 [0] NCCL INFO Channel 01 : 40[1a000] -> 41[1c000] via P2P/IPC +r8i1n7:45556:45605 [1] NCCL INFO Channel 02 : 13[1c000] -> 14[88000] via P2P/IPC +r7i6n2:60566:60604 [2] NCCL INFO Channel 02 : 6[88000] -> 7[8a000] via P2P/IPC +r14i7n0:25488:25537 [1] NCCL INFO Channel 02 : 33[1c000] -> 34[88000] via P2P/IPC +r14i7n4:5388:5438 [0] NCCL INFO Channel 01 : 48[1a000] -> 49[1c000] via P2P/IPC +r8i3n1:64580:64623 [2] NCCL INFO Channel 02 : 22[88000] -> 21[1c000] via P2P/IPC +r7i6n2:60565:60609 [1] NCCL INFO Channel 02 : 5[1c000] -> 6[88000] via P2P/IPC +r8i5n2:15800:15838 [2] NCCL INFO Channel 02 : 30[88000] -> 29[1c000] via P2P/IPC +r8i1n7:45557:45600 [2] NCCL INFO Channel 02 : 14[88000] -> 15[8a000] via P2P/IPC +r14i7n0:25489:25532 [2] NCCL INFO Channel 02 : 34[88000] -> 35[8a000] via P2P/IPC +r8i2n1:41758:41803 [1] NCCL INFO Channel 02 : 17[1c000] -> 18[88000] via P2P/IPC +r14i7n5:42530:42573 [2] NCCL INFO Channel 02 : 54[88000] -> 55[8a000] via P2P/IPC +r14i7n5:42529:42568 [1] NCCL INFO Channel 02 : 53[1c000] -> 54[88000] via P2P/IPC +r14i7n7:81281:81329 [2] NCCL INFO Channel 02 : 62[88000] -> 63[8a000] via P2P/IPC +r8i2n1:41759:41798 [2] NCCL INFO Channel 02 : 18[88000] -> 19[8a000] via P2P/IPC +r8i1n1:21964:22013 [1] NCCL INFO Channel 02 : 9[1c000] -> 10[88000] via P2P/IPC +r14i7n1:29726:29770 [1] NCCL INFO Channel 02 : 37[1c000] -> 38[88000] via P2P/IPC +r8i3n6:76354:76402 [2] NCCL INFO Channel 02 : 26[88000] -> 25[1c000] via P2P/IPC +r14i7n6:27449:27483 [1] NCCL INFO Channel 02 : 57[1c000] -> 58[88000] via P2P/IPC +r14i7n1:29727:29760 [2] NCCL INFO Channel 02 : 38[88000] -> 39[8a000] via P2P/IPC +r8i1n1:21965:22008 [2] NCCL INFO Channel 02 : 10[88000] -> 11[8a000] via P2P/IPC +r14i7n6:27450:27493 [2] NCCL INFO Channel 02 : 58[88000] -> 59[8a000] via P2P/IPC +r7i6n2:60566:60604 [2] NCCL INFO Channel 02 : 6[88000] -> 5[1c000] via P2P/IPC +r8i1n7:45557:45600 [2] NCCL INFO Channel 02 : 14[88000] -> 13[1c000] via P2P/IPC +r14i7n7:81281:81329 [2] NCCL INFO Channel 02 : 62[88000] -> 61[1c000] via P2P/IPC +r14i7n0:25489:25532 [2] NCCL INFO Channel 02 : 34[88000] -> 33[1c000] via P2P/IPC +r14i7n3:66127:66166 [1] NCCL INFO Channel 02 : 45[1c000] -> 46[88000] via P2P/IPC +r8i2n1:41759:41798 [2] NCCL INFO Channel 02 : 18[88000] -> 17[1c000] via P2P/IPC +r14i7n5:42530:42573 [2] NCCL INFO Channel 02 : 54[88000] -> 53[1c000] via P2P/IPC +r14i7n3:66128:66176 [2] NCCL INFO Channel 02 : 46[88000] -> 47[8a000] via P2P/IPC +r14i7n4:5389:5423 [1] NCCL INFO Channel 02 : 49[1c000] -> 50[88000] via P2P/IPC +r14i7n2:39446:39481 [1] NCCL INFO Channel 02 : 41[1c000] -> 42[88000] via P2P/IPC +r14i7n1:29727:29760 [2] NCCL INFO Channel 02 : 38[88000] -> 37[1c000] via P2P/IPC +r14i7n2:39447:39496 [2] NCCL INFO Channel 02 : 42[88000] -> 43[8a000] via P2P/IPC +r8i1n1:21965:22008 [2] NCCL INFO Channel 02 : 10[88000] -> 9[1c000] via P2P/IPC +r14i7n4:5390:5433 [2] NCCL INFO Channel 02 : 50[88000] -> 51[8a000] via P2P/IPC +r14i7n6:27450:27493 [2] NCCL INFO Channel 02 : 58[88000] -> 57[1c000] via P2P/IPC +r14i7n3:66128:66176 [2] NCCL INFO Channel 02 : 46[88000] -> 45[1c000] via P2P/IPC +r14i7n4:5390:5433 [2] NCCL INFO Channel 02 : 50[88000] -> 49[1c000] via P2P/IPC +r14i7n2:39447:39496 [2] NCCL INFO Channel 02 : 42[88000] -> 41[1c000] via P2P/IPC +r7i4n1:77231:77291 [0] NCCL INFO Channel 02 : 63[8a000] -> 0[1a000] [receive] via NET/IB/3 +r7i4n1:77234:77296 [3] NCCL INFO Channel 02 : 3[8a000] -> 4[1a000] [send] via NET/IB/2 +r7i4n1:77231:77291 [0] NCCL INFO Channel 02 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i3n1:64581:64618 [3] NCCL INFO Channel 02 : 23[8a000] -> 24[1a000] [send] via NET/IB/2 +r8i3n1:64578:64628 [0] NCCL INFO Channel 02 : 19[8a000] -> 20[1a000] [receive] via NET/IB/3 +r8i5n2:15801:15833 [3] NCCL INFO Channel 02 : 31[8a000] -> 32[1a000] [send] via NET/IB/2 +r8i5n2:15798:15843 [0] NCCL INFO Channel 02 : 27[8a000] -> 28[1a000] [receive] via NET/IB/3 +r8i3n1:64578:64628 [0] NCCL INFO Channel 02 : 20[1a000] -> 21[1c000] via P2P/IPC +r8i5n2:15798:15843 [0] NCCL INFO Channel 02 : 28[1a000] -> 29[1c000] via P2P/IPC +r14i7n7:81279:81314 [0] NCCL INFO Channel 02 : 59[8a000] -> 60[1a000] [receive] via NET/IB/3 +r14i7n7:81282:81324 [3] NCCL INFO Channel 02 : 63[8a000] -> 0[1a000] [send] via NET/IB/2 +r8i3n6:76352:76387 [0] NCCL INFO Channel 02 : 23[8a000] -> 24[1a000] [receive] via NET/IB/3 +r7i6n2:60564:60599 [0] NCCL INFO Channel 02 : 3[8a000] -> 4[1a000] [receive] via NET/IB/3 +r8i3n6:76355:76396 [3] NCCL INFO Channel 02 : 27[8a000] -> 28[1a000] [send] via NET/IB/2 +r14i7n7:81279:81314 [0] NCCL INFO Channel 02 : 60[1a000] -> 61[1c000] via P2P/IPC +r7i4n1:77232:77301 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n2:60567:60614 [3] NCCL INFO Channel 02 : 7[8a000] -> 8[1a000] [send] via NET/IB/2 +r8i1n7:45555:45595 [0] NCCL INFO Channel 02 : 11[8a000] -> 12[1a000] [receive] via NET/IB/3 +r8i1n7:45558:45590 [3] NCCL INFO Channel 02 : 15[8a000] -> 16[1a000] [send] via NET/IB/2 +r8i3n6:76352:76387 [0] NCCL INFO Channel 02 : 24[1a000] -> 25[1c000] via P2P/IPC +r14i7n0:25490:25527 [3] NCCL INFO Channel 02 : 35[8a000] -> 36[1a000] [send] via NET/IB/2 +r7i6n2:60564:60599 [0] NCCL INFO Channel 02 : 4[1a000] -> 5[1c000] via P2P/IPC +r14i7n0:25487:25522 [0] NCCL INFO Channel 02 : 31[8a000] -> 32[1a000] [receive] via NET/IB/3 +r14i7n5:42528:42578 [0] NCCL INFO Channel 02 : 51[8a000] -> 52[1a000] [receive] via NET/IB/3 +r8i2n1:41760:41808 [3] NCCL INFO Channel 02 : 19[8a000] -> 20[1a000] [send] via NET/IB/2 +r8i1n7:45555:45595 [0] NCCL INFO Channel 02 : 12[1a000] -> 13[1c000] via P2P/IPC +r8i2n1:41757:41793 [0] NCCL INFO Channel 02 : 15[8a000] -> 16[1a000] [receive] via NET/IB/3 +r14i7n5:42531:42567 [3] NCCL INFO Channel 02 : 55[8a000] -> 56[1a000] [send] via NET/IB/2 +r8i1n1:21966:21998 [3] NCCL INFO Channel 02 : 11[8a000] -> 12[1a000] [send] via NET/IB/2 +r14i7n0:25487:25522 [0] NCCL INFO Channel 02 : 32[1a000] -> 33[1c000] via P2P/IPC +r14i7n5:42528:42578 [0] NCCL INFO Channel 02 : 52[1a000] -> 53[1c000] via P2P/IPC +r8i1n1:21963:22007 [0] NCCL INFO Channel 02 : 7[8a000] -> 8[1a000] [receive] via NET/IB/3 +r8i3n1:64579:64613 [1] NCCL INFO Channel 02 : 21[1c000] -> 20[1a000] via P2P/IPC +r8i5n2:15799:15848 [1] NCCL INFO Channel 02 : 29[1c000] -> 28[1a000] via P2P/IPC +r8i2n1:41757:41793 [0] NCCL INFO Channel 02 : 16[1a000] -> 17[1c000] via P2P/IPC +r14i7n1:29725:29775 [0] NCCL INFO Channel 02 : 35[8a000] -> 36[1a000] [receive] via NET/IB/3 +r14i7n1:29728:29769 [3] NCCL INFO Channel 02 : 39[8a000] -> 40[1a000] [send] via NET/IB/2 +r14i7n6:27448:27488 [0] NCCL INFO Channel 02 : 55[8a000] -> 56[1a000] [receive] via NET/IB/3 +r8i1n1:21963:22007 [0] NCCL INFO Channel 02 : 8[1a000] -> 9[1c000] via P2P/IPC +r14i7n6:27451:27498 [3] NCCL INFO Channel 02 : 59[8a000] -> 60[1a000] [send] via NET/IB/2 +r14i7n1:29725:29775 [0] NCCL INFO Channel 02 : 36[1a000] -> 37[1c000] via P2P/IPC +r8i3n1:64581:64618 [3] NCCL INFO Channel 02 : 23[8a000] -> 22[88000] via P2P/IPC +r14i7n6:27448:27488 [0] NCCL INFO Channel 02 : 56[1a000] -> 57[1c000] via P2P/IPC +r14i7n7:81280:81319 [1] NCCL INFO Channel 02 : 61[1c000] -> 60[1a000] via P2P/IPC +r7i6n2:60565:60609 [1] NCCL INFO Channel 02 : 5[1c000] -> 4[1a000] via P2P/IPC +r14i7n2:39445:39491 [0] NCCL INFO Channel 02 : 39[8a000] -> 40[1a000] [receive] via NET/IB/3 +r14i7n7:81282:81324 [3] NCCL INFO Channel 02 : 63[8a000] -> 62[88000] via P2P/IPC +r14i7n3:66129:66161 [3] NCCL INFO Channel 02 : 47[8a000] -> 48[1a000] [send] via NET/IB/2 +r8i1n7:45556:45605 [1] NCCL INFO Channel 02 : 13[1c000] -> 12[1a000] via P2P/IPC +r14i7n3:66126:66171 [0] NCCL INFO Channel 02 : 43[8a000] -> 44[1a000] [receive] via NET/IB/3 +r14i7n2:39448:39486 [3] NCCL INFO Channel 02 : 43[8a000] -> 44[1a000] [send] via NET/IB/2 +r7i4n1:77234:77296 [3] NCCL INFO Channel 02 : 3[8a000] -> 2[88000] via P2P/IPC +r8i3n6:76353:76397 [1] NCCL INFO Channel 02 : 25[1c000] -> 24[1a000] via P2P/IPC +r14i7n5:42529:42568 [1] NCCL INFO Channel 02 : 53[1c000] -> 52[1a000] via P2P/IPC +r14i7n2:39445:39491 [0] NCCL INFO Channel 02 : 40[1a000] -> 41[1c000] via P2P/IPC +r14i7n3:66126:66171 [0] NCCL INFO Channel 02 : 44[1a000] -> 45[1c000] via P2P/IPC +r14i7n0:25488:25537 [1] NCCL INFO Channel 02 : 33[1c000] -> 32[1a000] via P2P/IPC +r14i7n4:5388:5438 [0] NCCL INFO Channel 02 : 47[8a000] -> 48[1a000] [receive] via NET/IB/3 +r8i5n2:15801:15833 [3] NCCL INFO Channel 02 : 31[8a000] -> 30[88000] via P2P/IPC +r8i3n6:76355:76396 [3] NCCL INFO Channel 02 : 27[8a000] -> 26[88000] via P2P/IPC +r14i7n4:5391:5428 [3] NCCL INFO Channel 02 : 51[8a000] -> 52[1a000] [send] via NET/IB/2 +r14i7n1:29726:29770 [1] NCCL INFO Channel 02 : 37[1c000] -> 36[1a000] via P2P/IPC +r8i1n7:45558:45590 [3] NCCL INFO Channel 02 : 15[8a000] -> 14[88000] via P2P/IPC +r14i7n5:42531:42567 [3] NCCL INFO Channel 02 : 55[8a000] -> 54[88000] via P2P/IPC +r14i7n4:5388:5438 [0] NCCL INFO Channel 02 : 48[1a000] -> 49[1c000] via P2P/IPC +r8i2n1:41760:41808 [3] NCCL INFO Channel 02 : 19[8a000] -> 18[88000] via P2P/IPC +r7i6n2:60567:60614 [3] NCCL INFO Channel 02 : 7[8a000] -> 6[88000] via P2P/IPC +r8i2n1:41758:41803 [1] NCCL INFO Channel 02 : 17[1c000] -> 16[1a000] via P2P/IPC +r8i1n1:21966:21998 [3] NCCL INFO Channel 02 : 11[8a000] -> 10[88000] via P2P/IPC +r14i7n0:25490:25527 [3] NCCL INFO Channel 02 : 35[8a000] -> 34[88000] via P2P/IPC +r14i7n6:27449:27483 [1] NCCL INFO Channel 02 : 57[1c000] -> 56[1a000] via P2P/IPC +r14i7n7:81281:81329 [2] NCCL INFO Channel 03 : 62[88000] -> 61[1c000] via P2P/IPC +r8i1n1:21964:22013 [1] NCCL INFO Channel 02 : 9[1c000] -> 8[1a000] via P2P/IPC +r14i7n7:81282:81324 [3] NCCL INFO Channel 03 : 63[8a000] -> 62[88000] via P2P/IPC +r14i7n6:27451:27498 [3] NCCL INFO Channel 02 : 59[8a000] -> 58[88000] via P2P/IPC +r8i3n1:64580:64623 [2] NCCL INFO Channel 03 : 22[88000] -> 21[1c000] via P2P/IPC +r8i3n1:64581:64618 [3] NCCL INFO Channel 03 : 23[8a000] -> 22[88000] via P2P/IPC +r14i7n1:29728:29769 [3] NCCL INFO Channel 02 : 39[8a000] -> 38[88000] via P2P/IPC +r7i4n1:77233:77306 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n1:77234:77296 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r14i7n3:66127:66166 [1] NCCL INFO Channel 02 : 45[1c000] -> 44[1a000] via P2P/IPC +r14i7n2:39446:39481 [1] NCCL INFO Channel 02 : 41[1c000] -> 40[1a000] via P2P/IPC +r14i7n5:42530:42573 [2] NCCL INFO Channel 03 : 54[88000] -> 53[1c000] via P2P/IPC +r14i7n2:39448:39486 [3] NCCL INFO Channel 02 : 43[8a000] -> 42[88000] via P2P/IPC +r8i5n2:15800:15838 [2] NCCL INFO Channel 03 : 30[88000] -> 29[1c000] via P2P/IPC +r8i5n2:15801:15833 [3] NCCL INFO Channel 03 : 31[8a000] -> 30[88000] via P2P/IPC +r14i7n5:42531:42567 [3] NCCL INFO Channel 03 : 55[8a000] -> 54[88000] via P2P/IPC +r14i7n3:66129:66161 [3] NCCL INFO Channel 02 : 47[8a000] -> 46[88000] via P2P/IPC +r8i3n6:76354:76402 [2] NCCL INFO Channel 03 : 26[88000] -> 25[1c000] via P2P/IPC +r8i3n6:76355:76396 [3] NCCL INFO Channel 03 : 27[8a000] -> 26[88000] via P2P/IPC +r14i7n4:5389:5423 [1] NCCL INFO Channel 02 : 49[1c000] -> 48[1a000] via P2P/IPC +r8i1n7:45557:45600 [2] NCCL INFO Channel 03 : 14[88000] -> 13[1c000] via P2P/IPC +r8i2n1:41759:41798 [2] NCCL INFO Channel 03 : 18[88000] -> 17[1c000] via P2P/IPC +r8i1n7:45558:45590 [3] NCCL INFO Channel 03 : 15[8a000] -> 14[88000] via P2P/IPC +r7i6n2:60566:60604 [2] NCCL INFO Channel 03 : 6[88000] -> 5[1c000] via P2P/IPC +r14i7n4:5391:5428 [3] NCCL INFO Channel 02 : 51[8a000] -> 50[88000] via P2P/IPC +r7i6n2:60567:60614 [3] NCCL INFO Channel 03 : 7[8a000] -> 6[88000] via P2P/IPC +r8i2n1:41760:41808 [3] NCCL INFO Channel 03 : 19[8a000] -> 18[88000] via P2P/IPC +r14i7n0:25489:25532 [2] NCCL INFO Channel 03 : 34[88000] -> 33[1c000] via P2P/IPC +r14i7n0:25490:25527 [3] NCCL INFO Channel 03 : 35[8a000] -> 34[88000] via P2P/IPC +r8i1n1:21966:21998 [3] NCCL INFO Channel 03 : 11[8a000] -> 10[88000] via P2P/IPC +r8i1n1:21965:22008 [2] NCCL INFO Channel 03 : 10[88000] -> 9[1c000] via P2P/IPC +r14i7n6:27450:27493 [2] NCCL INFO Channel 03 : 58[88000] -> 57[1c000] via P2P/IPC +r14i7n6:27451:27498 [3] NCCL INFO Channel 03 : 59[8a000] -> 58[88000] via P2P/IPC +r14i7n1:29728:29769 [3] NCCL INFO Channel 03 : 39[8a000] -> 38[88000] via P2P/IPC +r14i7n1:29727:29760 [2] NCCL INFO Channel 03 : 38[88000] -> 37[1c000] via P2P/IPC +r14i7n2:39447:39496 [2] NCCL INFO Channel 03 : 42[88000] -> 41[1c000] via P2P/IPC +r14i7n2:39448:39486 [3] NCCL INFO Channel 03 : 43[8a000] -> 42[88000] via P2P/IPC +r14i7n3:66128:66176 [2] NCCL INFO Channel 03 : 46[88000] -> 45[1c000] via P2P/IPC +r14i7n3:66129:66161 [3] NCCL INFO Channel 03 : 47[8a000] -> 46[88000] via P2P/IPC +r14i7n4:5390:5433 [2] NCCL INFO Channel 03 : 50[88000] -> 49[1c000] via P2P/IPC +r14i7n4:5391:5428 [3] NCCL INFO Channel 03 : 51[8a000] -> 50[88000] via P2P/IPC +r7i4n1:77231:77291 [0] NCCL INFO Channel 02 : 0[1a000] -> 60[1a000] [send] via NET/IB/3 +r8i5n2:15798:15843 [0] NCCL INFO Channel 02 : 24[1a000] -> 28[1a000] [receive] via NET/IB/3 +r8i3n6:76352:76387 [0] NCCL INFO Channel 02 : 24[1a000] -> 28[1a000] [send] via NET/IB/3 +r8i1n7:45555:45595 [0] NCCL INFO Channel 02 : 8[1a000] -> 12[1a000] [receive] via NET/IB/3 +r7i6n2:60564:60599 [0] NCCL INFO Channel 02 : 36[1a000] -> 4[1a000] [receive] via NET/IB/3 +r8i3n1:64578:64628 [0] NCCL INFO Channel 02 : 12[1a000] -> 20[1a000] [receive] via NET/IB/3 +r14i7n0:25487:25522 [0] NCCL INFO Channel 02 : 32[1a000] -> 28[1a000] [send] via NET/IB/3 +r8i2n1:41757:41793 [0] NCCL INFO Channel 02 : 16[1a000] -> 12[1a000] [send] via NET/IB/3 +r14i7n7:81279:81314 [0] NCCL INFO Channel 02 : 56[1a000] -> 60[1a000] [receive] via NET/IB/3 +r14i7n6:27448:27488 [0] NCCL INFO Channel 02 : 56[1a000] -> 60[1a000] [send] via NET/IB/3 +r7i4n1:77232:77301 [1] NCCL INFO Channel 03 : 1[1c000] -> 4[1a000] [send] via NET/IB/3 +r8i1n1:21963:22007 [0] NCCL INFO Channel 02 : 8[1a000] -> 12[1a000] [send] via NET/IB/3 +r14i7n1:29725:29775 [0] NCCL INFO Channel 02 : 20[1a000] -> 36[1a000] [receive] via NET/IB/3 +r8i5n2:15799:15848 [1] NCCL INFO Channel 03 : 29[1c000] -> 32[1a000] [send] via NET/IB/3 +r7i4n1:77233:77306 [2] NCCL INFO Channel 03 : 2[88000] -> 62[88000] [send] via NET/IB/2 +r14i7n2:39445:39491 [0] NCCL INFO Channel 02 : 40[1a000] -> 44[1a000] [send] via NET/IB/3 +r7i6n2:60565:60609 [1] NCCL INFO Channel 03 : 5[1c000] -> 8[1a000] [send] via NET/IB/3 +r8i3n1:64580:64623 [2] NCCL INFO Channel 03 : 14[88000] -> 22[88000] [receive] via NET/IB/2 +r14i7n3:66126:66171 [0] NCCL INFO Channel 02 : 40[1a000] -> 44[1a000] [receive] via NET/IB/3 +r14i7n5:42528:42578 [0] NCCL INFO Channel 02 : 44[1a000] -> 52[1a000] [receive] via NET/IB/3 +r14i7n7:81281:81329 [2] NCCL INFO Channel 03 : 58[88000] -> 62[88000] [receive] via NET/IB/2 +r14i7n7:81280:81319 [1] NCCL INFO Channel 03 : 61[1c000] -> 0[1a000] [send] via NET/IB/3 +r8i3n1:64579:64613 [1] NCCL INFO Channel 03 : 21[1c000] -> 24[1a000] [send] via NET/IB/3 +r8i5n2:15800:15838 [2] NCCL INFO Channel 03 : 26[88000] -> 30[88000] [receive] via NET/IB/2 +r14i7n0:25488:25537 [1] NCCL INFO Channel 03 : 33[1c000] -> 36[1a000] [send] via NET/IB/3 +r8i1n7:45556:45605 [1] NCCL INFO Channel 03 : 13[1c000] -> 16[1a000] [send] via NET/IB/3 +r8i3n6:76353:76397 [1] NCCL INFO Channel 03 : 25[1c000] -> 28[1a000] [send] via NET/IB/3 +r14i7n4:5388:5438 [0] NCCL INFO Channel 02 : 48[1a000] -> 44[1a000] [send] via NET/IB/3 +r8i2n1:41758:41803 [1] NCCL INFO Channel 03 : 17[1c000] -> 20[1a000] [send] via NET/IB/3 +r8i3n6:76354:76402 [2] NCCL INFO Channel 03 : 26[88000] -> 30[88000] [send] via NET/IB/2 +r7i6n2:60566:60604 [2] NCCL INFO Channel 03 : 38[88000] -> 6[88000] [receive] via NET/IB/2 +r8i1n7:45557:45600 [2] NCCL INFO Channel 03 : 10[88000] -> 14[88000] [receive] via NET/IB/2 +r8i2n1:41759:41798 [2] NCCL INFO Channel 03 : 18[88000] -> 14[88000] [send] via NET/IB/2 +r14i7n0:25489:25532 [2] NCCL INFO Channel 03 : 34[88000] -> 30[88000] [send] via NET/IB/2 +r14i7n1:29726:29770 [1] NCCL INFO Channel 03 : 37[1c000] -> 40[1a000] [send] via NET/IB/3 +r8i1n1:21964:22013 [1] NCCL INFO Channel 03 : 9[1c000] -> 12[1a000] [send] via NET/IB/3 +r14i7n6:27449:27483 [1] NCCL INFO Channel 03 : 57[1c000] -> 60[1a000] [send] via NET/IB/3 +r14i7n5:42529:42568 [1] NCCL INFO Channel 03 : 53[1c000] -> 56[1a000] [send] via NET/IB/3 +r14i7n5:42530:42573 [2] NCCL INFO Channel 03 : 46[88000] -> 54[88000] [receive] via NET/IB/2 +r8i1n1:21965:22008 [2] NCCL INFO Channel 03 : 10[88000] -> 14[88000] [send] via NET/IB/2 +r14i7n6:27450:27493 [2] NCCL INFO Channel 03 : 58[88000] -> 62[88000] [send] via NET/IB/2 +r14i7n1:29727:29760 [2] NCCL INFO Channel 03 : 22[88000] -> 38[88000] [receive] via NET/IB/2 +r14i7n2:39446:39481 [1] NCCL INFO Channel 03 : 41[1c000] -> 44[1a000] [send] via NET/IB/3 +r14i7n3:66127:66166 [1] NCCL INFO Channel 03 : 45[1c000] -> 48[1a000] [send] via NET/IB/3 +r14i7n2:39447:39496 [2] NCCL INFO Channel 03 : 42[88000] -> 46[88000] [send] via NET/IB/2 +r14i7n4:5389:5423 [1] NCCL INFO Channel 03 : 49[1c000] -> 52[1a000] [send] via NET/IB/3 +r14i7n3:66128:66176 [2] NCCL INFO Channel 03 : 42[88000] -> 46[88000] [receive] via NET/IB/2 +r14i7n4:5390:5433 [2] NCCL INFO Channel 03 : 50[88000] -> 46[88000] [send] via NET/IB/2 +r8i5n2:15798:15843 [0] NCCL INFO Channel 02 : 32[1a000] -> 28[1a000] [receive] via NET/IB/3 +r8i1n7:45555:45595 [0] NCCL INFO Channel 02 : 16[1a000] -> 12[1a000] [receive] via NET/IB/3 +r8i3n1:64578:64628 [0] NCCL INFO Channel 02 : 28[1a000] -> 20[1a000] [receive] via NET/IB/3 +r14i7n7:81279:81314 [0] NCCL INFO Channel 02 : 0[1a000] -> 60[1a000] [receive] via NET/IB/3 +r14i7n1:29725:29775 [0] NCCL INFO Channel 02 : 52[1a000] -> 36[1a000] [receive] via NET/IB/3 +r8i3n6:76352:76387 [0] NCCL INFO Channel 02 : 28[1a000] -> 24[1a000] [receive] via NET/IB/3 +r8i3n1:64580:64623 [2] NCCL INFO Channel 03 : 30[88000] -> 22[88000] [receive] via NET/IB/2 +r14i7n7:81281:81329 [2] NCCL INFO Channel 03 : 2[88000] -> 62[88000] [receive] via NET/IB/2 +r14i7n3:66126:66171 [0] NCCL INFO Channel 02 : 48[1a000] -> 44[1a000] [receive] via NET/IB/3 +r14i7n5:42528:42578 [0] NCCL INFO Channel 02 : 60[1a000] -> 52[1a000] [receive] via NET/IB/3 +r8i5n2:15800:15838 [2] NCCL INFO Channel 03 : 34[88000] -> 30[88000] [receive] via NET/IB/2 +r14i7n6:27448:27488 [0] NCCL INFO Channel 02 : 60[1a000] -> 56[1a000] [receive] via NET/IB/3 +r8i1n7:45557:45600 [2] NCCL INFO Channel 03 : 18[88000] -> 14[88000] [receive] via NET/IB/2 +r8i1n1:21963:22007 [0] NCCL INFO Channel 02 : 12[1a000] -> 8[1a000] [receive] via NET/IB/3 +r14i7n5:42530:42573 [2] NCCL INFO Channel 03 : 62[88000] -> 54[88000] [receive] via NET/IB/2 +r8i3n6:76354:76402 [2] NCCL INFO Channel 03 : 30[88000] -> 26[88000] [receive] via NET/IB/2 +r14i7n2:39445:39491 [0] NCCL INFO Channel 02 : 44[1a000] -> 40[1a000] [receive] via NET/IB/3 +r8i3n6:76354:76402 [2] NCCL INFO Channel 03 : 26[88000] -> 27[8a000] via P2P/IPC +r14i7n1:29727:29760 [2] NCCL INFO Channel 03 : 54[88000] -> 38[88000] [receive] via NET/IB/2 +r8i1n1:21965:22008 [2] NCCL INFO Channel 03 : 14[88000] -> 10[88000] [receive] via NET/IB/2 +r14i7n6:27450:27493 [2] NCCL INFO Channel 03 : 62[88000] -> 58[88000] [receive] via NET/IB/2 +r14i7n3:66128:66176 [2] NCCL INFO Channel 03 : 50[88000] -> 46[88000] [receive] via NET/IB/2 +r14i7n6:27450:27493 [2] NCCL INFO Channel 03 : 58[88000] -> 59[8a000] via P2P/IPC +r8i1n1:21965:22008 [2] NCCL INFO Channel 03 : 10[88000] -> 11[8a000] via P2P/IPC +r8i5n2:15798:15843 [0] NCCL INFO Channel 02 : 28[1a000] -> 20[1a000] [send] via NET/IB/3 +r14i7n2:39447:39496 [2] NCCL INFO Channel 03 : 46[88000] -> 42[88000] [receive] via NET/IB/2 +r8i1n7:45555:45595 [0] NCCL INFO Channel 02 : 12[1a000] -> 20[1a000] [send] via NET/IB/3 +r14i7n2:39447:39496 [2] NCCL INFO Channel 03 : 42[88000] -> 43[8a000] via P2P/IPC +r8i3n1:64578:64628 [0] NCCL INFO Channel 02 : 20[1a000] -> 36[1a000] [send] via NET/IB/3 +r14i7n7:81279:81314 [0] NCCL INFO Channel 02 : 60[1a000] -> 52[1a000] [send] via NET/IB/3 +r14i7n0:25487:25522 [0] NCCL INFO Channel 02 : 28[1a000] -> 32[1a000] [receive] via NET/IB/3 +r8i2n1:41757:41793 [0] NCCL INFO Channel 02 : 12[1a000] -> 16[1a000] [receive] via NET/IB/3 +r14i7n1:29725:29775 [0] NCCL INFO Channel 02 : 36[1a000] -> 4[1a000] [send] via NET/IB/3 +r7i4n1:77231:77291 [0] NCCL INFO Channel 02 : 60[1a000] -> 0[1a000] [receive] via NET/IB/3 +r14i7n3:66126:66171 [0] NCCL INFO Channel 02 : 44[1a000] -> 52[1a000] [send] via NET/IB/3 +r14i7n5:42528:42578 [0] NCCL INFO Channel 02 : 52[1a000] -> 36[1a000] [send] via NET/IB/3 +r14i7n4:5388:5438 [0] NCCL INFO Channel 02 : 44[1a000] -> 48[1a000] [receive] via NET/IB/3 +r8i5n2:15798:15843 [0] NCCL INFO Channel 02 : 20[1a000] -> 28[1a000] [receive] via NET/IB/3 +r7i6n2:60564:60599 [0] NCCL INFO Channel 02 : 4[1a000] -> 36[1a000] [send] via NET/IB/3 +r8i1n7:45555:45595 [0] NCCL INFO Channel 02 : 20[1a000] -> 12[1a000] [receive] via NET/IB/3 +r14i7n7:81279:81314 [0] NCCL INFO Channel 02 : 52[1a000] -> 60[1a000] [receive] via NET/IB/3 +r8i3n1:64578:64628 [0] NCCL INFO Channel 02 : 36[1a000] -> 20[1a000] [receive] via NET/IB/3 +r14i7n1:29725:29775 [0] NCCL INFO Channel 02 : 4[1a000] -> 36[1a000] [receive] via NET/IB/3 +r14i7n3:66126:66171 [0] NCCL INFO Channel 02 : 52[1a000] -> 44[1a000] [receive] via NET/IB/3 +r14i7n5:42528:42578 [0] NCCL INFO Channel 02 : 36[1a000] -> 52[1a000] [receive] via NET/IB/3 +r8i5n2:15798:15843 [0] NCCL INFO Channel 02 : 28[1a000] -> 24[1a000] [send] via NET/IB/3 +r8i1n7:45555:45595 [0] NCCL INFO Channel 02 : 12[1a000] -> 8[1a000] [send] via NET/IB/3 +r14i7n7:81279:81314 [0] NCCL INFO Channel 02 : 60[1a000] -> 56[1a000] [send] via NET/IB/3 +r8i3n1:64578:64628 [0] NCCL INFO Channel 02 : 20[1a000] -> 12[1a000] [send] via NET/IB/3 +r14i7n3:66126:66171 [0] NCCL INFO Channel 02 : 44[1a000] -> 40[1a000] [send] via NET/IB/3 +r14i7n1:29725:29775 [0] NCCL INFO Channel 02 : 36[1a000] -> 20[1a000] [send] via NET/IB/3 +r14i7n5:42528:42578 [0] NCCL INFO Channel 02 : 52[1a000] -> 44[1a000] [send] via NET/IB/3 +r7i6n2:60564:60599 [0] NCCL INFO Channel 03 : 1[1c000] -> 4[1a000] [receive] via NET/IB/3 +r7i6n2:60564:60599 [0] NCCL INFO Channel 03 : 4[1a000] -> 7[8a000] via P2P/IPC +r7i6n2:60567:60614 [3] NCCL INFO Channel 03 : 7[8a000] -> 4[1a000] via P2P/IPC +r7i4n1:77232:77301 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i1n7:45557:45600 [2] NCCL INFO Channel 03 : 14[88000] -> 22[88000] [send] via NET/IB/2 +r8i5n2:15798:15843 [0] NCCL INFO Channel 02 : 28[1a000] -> 32[1a000] [send] via NET/IB/3 +r8i2n1:41759:41798 [2] NCCL INFO Channel 03 : 14[88000] -> 18[88000] [receive] via NET/IB/2 +r14i7n7:81279:81314 [0] NCCL INFO Channel 02 : 60[1a000] -> 0[1a000] [send] via NET/IB/3 +r8i1n7:45555:45595 [0] NCCL INFO Channel 02 : 12[1a000] -> 16[1a000] [send] via NET/IB/3 +r8i3n1:64580:64623 [2] NCCL INFO Channel 03 : 22[88000] -> 38[88000] [send] via NET/IB/2 +r8i2n1:41759:41798 [2] NCCL INFO Channel 03 : 18[88000] -> 19[8a000] via P2P/IPC +r8i3n1:64578:64628 [0] NCCL INFO Channel 02 : 20[1a000] -> 28[1a000] [send] via NET/IB/3 +r14i7n3:66128:66176 [2] NCCL INFO Channel 03 : 46[88000] -> 54[88000] [send] via NET/IB/2 +r8i5n2:15800:15838 [2] NCCL INFO Channel 03 : 30[88000] -> 22[88000] [send] via NET/IB/2 +r14i7n5:42528:42578 [0] NCCL INFO Channel 02 : 52[1a000] -> 60[1a000] [send] via NET/IB/3 +r14i7n3:66126:66171 [0] NCCL INFO Channel 02 : 44[1a000] -> 48[1a000] [send] via NET/IB/3 +r14i7n1:29725:29775 [0] NCCL INFO Channel 02 : 36[1a000] -> 52[1a000] [send] via NET/IB/3 +r14i7n7:81281:81329 [2] NCCL INFO Channel 03 : 62[88000] -> 54[88000] [send] via NET/IB/2 +r14i7n4:5390:5433 [2] NCCL INFO Channel 03 : 46[88000] -> 50[88000] [receive] via NET/IB/2 +r14i7n0:25489:25532 [2] NCCL INFO Channel 03 : 30[88000] -> 34[88000] [receive] via NET/IB/2 +r14i7n4:5390:5433 [2] NCCL INFO Channel 03 : 50[88000] -> 51[8a000] via P2P/IPC +r14i7n0:25489:25532 [2] NCCL INFO Channel 03 : 34[88000] -> 35[8a000] via P2P/IPC +r7i4n1:77233:77306 [2] NCCL INFO Channel 03 : 62[88000] -> 2[88000] [receive] via NET/IB/2 +r8i3n6:76352:76387 [0] NCCL INFO Channel 03 : 21[1c000] -> 24[1a000] [receive] via NET/IB/3 +r8i3n6:76352:76387 [0] NCCL INFO Channel 03 : 24[1a000] -> 27[8a000] via P2P/IPC +r14i7n6:27448:27488 [0] NCCL INFO Channel 03 : 53[1c000] -> 56[1a000] [receive] via NET/IB/3 +r14i7n0:25487:25522 [0] NCCL INFO Channel 03 : 29[1c000] -> 32[1a000] [receive] via NET/IB/3 +r8i1n1:21963:22007 [0] NCCL INFO Channel 03 : 5[1c000] -> 8[1a000] [receive] via NET/IB/3 +r14i7n6:27448:27488 [0] NCCL INFO Channel 03 : 56[1a000] -> 59[8a000] via P2P/IPC +r14i7n0:25487:25522 [0] NCCL INFO Channel 03 : 32[1a000] -> 35[8a000] via P2P/IPC +r8i1n1:21963:22007 [0] NCCL INFO Channel 03 : 8[1a000] -> 11[8a000] via P2P/IPC +r8i1n7:45557:45600 [2] NCCL INFO Channel 03 : 22[88000] -> 14[88000] [receive] via NET/IB/2 +r7i4n1:77231:77291 [0] NCCL INFO Channel 03 : 61[1c000] -> 0[1a000] [receive] via NET/IB/3 +r8i2n1:41757:41793 [0] NCCL INFO Channel 03 : 13[1c000] -> 16[1a000] [receive] via NET/IB/3 +r8i3n6:76355:76396 [3] NCCL INFO Channel 03 : 27[8a000] -> 24[1a000] via P2P/IPC +r8i2n1:41757:41793 [0] NCCL INFO Channel 03 : 16[1a000] -> 19[8a000] via P2P/IPC +r7i4n1:77231:77291 [0] NCCL INFO Channel 03 : 0[1a000] -> 3[8a000] via P2P/IPC +r8i1n7:45557:45600 [2] NCCL INFO Channel 03 : 14[88000] -> 15[8a000] via P2P/IPC +r14i7n5:42529:42568 [1] NCCL INFO Channel 03 : 53[1c000] -> 52[1a000] via P2P/IPC +r14i7n6:27451:27498 [3] NCCL INFO Channel 03 : 59[8a000] -> 56[1a000] via P2P/IPC +r8i3n1:64580:64623 [2] NCCL INFO Channel 03 : 38[88000] -> 22[88000] [receive] via NET/IB/2 +r14i7n2:39445:39491 [0] NCCL INFO Channel 03 : 37[1c000] -> 40[1a000] [receive] via NET/IB/3 +r7i6n2:60565:60609 [1] NCCL INFO Channel 03 : 5[1c000] -> 4[1a000] via P2P/IPC +r8i3n1:64579:64613 [1] NCCL INFO Channel 03 : 21[1c000] -> 20[1a000] via P2P/IPC +r8i5n2:15798:15843 [0] NCCL INFO Channel 03 : 25[1c000] -> 28[1a000] [receive] via NET/IB/3 +r14i7n0:25490:25527 [3] NCCL INFO Channel 03 : 35[8a000] -> 32[1a000] via P2P/IPC +r8i1n1:21966:21998 [3] NCCL INFO Channel 03 : 11[8a000] -> 8[1a000] via P2P/IPC +r8i5n2:15799:15848 [1] NCCL INFO Channel 03 : 29[1c000] -> 28[1a000] via P2P/IPC +r8i1n7:45555:45595 [0] NCCL INFO Channel 03 : 9[1c000] -> 12[1a000] [receive] via NET/IB/3 +r14i7n2:39445:39491 [0] NCCL INFO Channel 03 : 40[1a000] -> 43[8a000] via P2P/IPC +r8i3n1:64580:64623 [2] NCCL INFO Channel 03 : 22[88000] -> 23[8a000] via P2P/IPC +r8i1n7:45555:45595 [0] NCCL INFO Channel 03 : 12[1a000] -> 15[8a000] via P2P/IPC +r7i4n1:77234:77296 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i3n1:64578:64628 [0] NCCL INFO Channel 03 : 17[1c000] -> 20[1a000] [receive] via NET/IB/3 +r8i5n2:15798:15843 [0] NCCL INFO Channel 03 : 28[1a000] -> 31[8a000] via P2P/IPC +r14i7n7:81279:81314 [0] NCCL INFO Channel 03 : 57[1c000] -> 60[1a000] [receive] via NET/IB/3 +r14i7n3:66128:66176 [2] NCCL INFO Channel 03 : 54[88000] -> 46[88000] [receive] via NET/IB/2 +r14i7n4:5388:5438 [0] NCCL INFO Channel 03 : 45[1c000] -> 48[1a000] [receive] via NET/IB/3 +r8i2n1:41760:41808 [3] NCCL INFO Channel 03 : 19[8a000] -> 16[1a000] via P2P/IPC +r14i7n7:81280:81319 [1] NCCL INFO Channel 03 : 61[1c000] -> 60[1a000] via P2P/IPC +r8i3n1:64578:64628 [0] NCCL INFO Channel 03 : 20[1a000] -> 23[8a000] via P2P/IPC +r14i7n7:81279:81314 [0] NCCL INFO Channel 03 : 60[1a000] -> 63[8a000] via P2P/IPC +r8i5n2:15800:15838 [2] NCCL INFO Channel 03 : 22[88000] -> 30[88000] [receive] via NET/IB/2 +r14i7n4:5388:5438 [0] NCCL INFO Channel 03 : 48[1a000] -> 51[8a000] via P2P/IPC +r14i7n3:66128:66176 [2] NCCL INFO Channel 03 : 46[88000] -> 47[8a000] via P2P/IPC +r7i6n2:60564:60599 [0] NCCL INFO Channel 03 : 4[1a000] -> 5[1c000] via P2P/IPC +r14i7n5:42530:42573 [2] NCCL INFO Channel 03 : 54[88000] -> 38[88000] [send] via NET/IB/2 +r14i7n3:66126:66171 [0] NCCL INFO Channel 03 : 41[1c000] -> 44[1a000] [receive] via NET/IB/3 +r8i1n7:45556:45605 [1] NCCL INFO Channel 03 : 13[1c000] -> 12[1a000] via P2P/IPC +r14i7n2:39448:39486 [3] NCCL INFO Channel 03 : 43[8a000] -> 40[1a000] via P2P/IPC +r14i7n5:42528:42578 [0] NCCL INFO Channel 03 : 49[1c000] -> 52[1a000] [receive] via NET/IB/3 +r8i1n1:21964:22013 [1] NCCL INFO Channel 03 : 9[1c000] -> 8[1a000] via P2P/IPC +r8i3n6:76353:76397 [1] NCCL INFO Channel 03 : 25[1c000] -> 24[1a000] via P2P/IPC +r7i6n2:60565:60609 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i5n2:15800:15838 [2] NCCL INFO Channel 03 : 30[88000] -> 31[8a000] via P2P/IPC +r14i7n5:42528:42578 [0] NCCL INFO Channel 03 : 52[1a000] -> 55[8a000] via P2P/IPC +r14i7n3:66126:66171 [0] NCCL INFO Channel 03 : 44[1a000] -> 47[8a000] via P2P/IPC +r8i5n2:15801:15833 [3] NCCL INFO Channel 03 : 31[8a000] -> 28[1a000] via P2P/IPC +r14i7n1:29726:29770 [1] NCCL INFO Channel 03 : 37[1c000] -> 36[1a000] via P2P/IPC +r8i1n7:45558:45590 [3] NCCL INFO Channel 03 : 15[8a000] -> 12[1a000] via P2P/IPC +r7i6n2:60565:60609 [1] NCCL INFO comm 0x154050001060 rank 5 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i6n2:60564:60599 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n1:29727:29760 [2] NCCL INFO Channel 03 : 38[88000] -> 6[88000] [send] via NET/IB/2 +r14i7n7:81282:81324 [3] NCCL INFO Channel 03 : 63[8a000] -> 60[1a000] via P2P/IPC +r14i7n6:27449:27483 [1] NCCL INFO Channel 03 : 57[1c000] -> 56[1a000] via P2P/IPC +r14i7n1:29725:29775 [0] NCCL INFO Channel 03 : 33[1c000] -> 36[1a000] [receive] via NET/IB/3 +r8i2n1:41758:41803 [1] NCCL INFO Channel 03 : 17[1c000] -> 16[1a000] via P2P/IPC +r8i3n1:64581:64618 [3] NCCL INFO Channel 03 : 23[8a000] -> 20[1a000] via P2P/IPC +r7i6n2:60564:60599 [0] NCCL INFO comm 0x14ed60001060 rank 4 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r14i7n1:29725:29775 [0] NCCL INFO Channel 03 : 36[1a000] -> 39[8a000] via P2P/IPC +r14i7n4:5391:5428 [3] NCCL INFO Channel 03 : 51[8a000] -> 48[1a000] via P2P/IPC +r7i4n1:77231:77291 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r14i7n5:42531:42567 [3] NCCL INFO Channel 03 : 55[8a000] -> 52[1a000] via P2P/IPC +r8i1n1:21963:22007 [0] NCCL INFO Channel 03 : 8[1a000] -> 9[1c000] via P2P/IPC +r14i7n2:39446:39481 [1] NCCL INFO Channel 03 : 41[1c000] -> 40[1a000] via P2P/IPC +r8i3n6:76352:76387 [0] NCCL INFO Channel 03 : 24[1a000] -> 25[1c000] via P2P/IPC +r8i1n1:21966:21998 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n3:66127:66166 [1] NCCL INFO Channel 03 : 45[1c000] -> 44[1a000] via P2P/IPC +r8i3n6:76355:76396 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i4n1:77232:77301 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n4:5389:5423 [1] NCCL INFO Channel 03 : 49[1c000] -> 48[1a000] via P2P/IPC +r14i7n7:81281:81329 [2] NCCL INFO Channel 03 : 54[88000] -> 62[88000] [receive] via NET/IB/2 +r8i1n1:21966:21998 [3] NCCL INFO comm 0x153eb0001060 rank 11 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i4n1:77232:77301 [1] NCCL INFO comm 0x147b78001060 rank 1 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r8i1n1:21964:22013 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i3n6:76355:76396 [3] NCCL INFO comm 0x147370001060 rank 27 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r8i2n1:41757:41793 [0] NCCL INFO Channel 03 : 16[1a000] -> 17[1c000] via P2P/IPC +r14i7n1:29728:29769 [3] NCCL INFO Channel 03 : 39[8a000] -> 36[1a000] via P2P/IPC +r8i3n6:76353:76397 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n6:27448:27488 [0] NCCL INFO Channel 03 : 56[1a000] -> 57[1c000] via P2P/IPC +r7i4n1:77231:77291 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i1n1:21964:22013 [1] NCCL INFO comm 0x153980001060 rank 9 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r14i7n3:66129:66161 [3] NCCL INFO Channel 03 : 47[8a000] -> 44[1a000] via P2P/IPC +r8i3n6:76353:76397 [1] NCCL INFO comm 0x14acc8001060 rank 25 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r14i7n0:25488:25537 [1] NCCL INFO Channel 03 : 33[1c000] -> 32[1a000] via P2P/IPC +r14i7n6:27451:27498 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i1n1:21963:22007 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i2n1:41760:41808 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i4n1:77231:77291 [0] NCCL INFO comm 0x15100c001060 rank 0 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r8i3n6:76352:76387 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i5n2:15798:15843 [0] NCCL INFO Channel 03 : 28[1a000] -> 29[1c000] via P2P/IPC +r8i1n7:45555:45595 [0] NCCL INFO Channel 03 : 12[1a000] -> 13[1c000] via P2P/IPC +r8i1n1:21963:22007 [0] NCCL INFO comm 0x14e618001060 rank 8 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r8i2n1:41760:41808 [3] NCCL INFO comm 0x1498c0001060 rank 19 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i4n1:77231:77231 [0] NCCL INFO Launch mode Parallel +r14i7n6:27451:27498 [3] NCCL INFO comm 0x14c788001060 rank 59 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r14i7n7:81281:81329 [2] NCCL INFO Channel 03 : 62[88000] -> 63[8a000] via P2P/IPC +r8i3n6:76352:76387 [0] NCCL INFO comm 0x14f3a0001060 rank 24 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r8i2n1:41758:41803 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n6:27449:27483 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i5n2:15801:15833 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n6:27449:27483 [1] NCCL INFO comm 0x146ec0001060 rank 57 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r8i3n1:64578:64628 [0] NCCL INFO Channel 03 : 20[1a000] -> 21[1c000] via P2P/IPC +r8i2n1:41758:41803 [1] NCCL INFO comm 0x14a560001060 rank 17 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r8i5n2:15801:15833 [3] NCCL INFO comm 0x14c280001060 rank 31 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r14i7n2:39445:39491 [0] NCCL INFO Channel 03 : 40[1a000] -> 41[1c000] via P2P/IPC +r8i5n2:15799:15848 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n6:27448:27488 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i2n1:41757:41793 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i1n7:45558:45590 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i3n1:64581:64618 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n7:81279:81314 [0] NCCL INFO Channel 03 : 60[1a000] -> 61[1c000] via P2P/IPC +r14i7n2:39448:39486 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i5n2:15799:15848 [1] NCCL INFO comm 0x153c10001060 rank 29 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r14i7n6:27448:27488 [0] NCCL INFO comm 0x146258001060 rank 56 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r8i2n1:41757:41793 [0] NCCL INFO comm 0x1546b0001060 rank 16 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r8i1n7:45558:45590 [3] NCCL INFO comm 0x151f90001060 rank 15 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r8i5n2:15798:15843 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i3n1:64581:64618 [3] NCCL INFO comm 0x14f700001060 rank 23 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r14i7n7:81282:81324 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n4:5388:5438 [0] NCCL INFO Channel 03 : 48[1a000] -> 49[1c000] via P2P/IPC +r8i1n7:45556:45605 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n2:39448:39486 [3] NCCL INFO comm 0x151e08001060 rank 43 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r14i7n2:39446:39481 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i5n2:15798:15843 [0] NCCL INFO comm 0x1480b8001060 rank 28 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r14i7n7:81282:81324 [3] NCCL INFO comm 0x148eb0001060 rank 63 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r14i7n5:42528:42578 [0] NCCL INFO Channel 03 : 52[1a000] -> 53[1c000] via P2P/IPC +r8i3n1:64579:64613 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n4:5391:5428 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n7:81280:81319 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n0:25487:25522 [0] NCCL INFO Channel 03 : 32[1a000] -> 33[1c000] via P2P/IPC +r14i7n2:39446:39481 [1] NCCL INFO comm 0x150550001060 rank 41 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r8i1n7:45555:45595 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i1n7:45557:45600 [2] NCCL INFO Channel 03 : 14[88000] -> 10[88000] [send] via NET/IB/2 +r8i3n1:64579:64613 [1] NCCL INFO comm 0x151e40001060 rank 21 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r14i7n7:81280:81319 [1] NCCL INFO comm 0x149ab8001060 rank 61 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r14i7n3:66126:66171 [0] NCCL INFO Channel 03 : 44[1a000] -> 45[1c000] via P2P/IPC +r14i7n4:5389:5423 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n2:39445:39491 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n0:25490:25527 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i1n7:45555:45595 [0] NCCL INFO comm 0x14bf70001060 rank 12 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r14i7n5:42529:42568 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n7:81279:81314 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i1n7:45556:45605 [1] NCCL INFO comm 0x14a380001060 rank 13 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r14i7n2:39445:39491 [0] NCCL INFO comm 0x14ead0001060 rank 40 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r14i7n4:5389:5423 [1] NCCL INFO comm 0x150500001060 rank 49 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r14i7n3:66129:66161 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n0:25490:25527 [3] NCCL INFO comm 0x1481f0001060 rank 35 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r14i7n7:81279:81314 [0] NCCL INFO comm 0x148e80001060 rank 60 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r14i7n5:42529:42568 [1] NCCL INFO comm 0x147128001060 rank 53 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r14i7n4:5388:5438 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n3:66129:66161 [3] NCCL INFO comm 0x1507a0001060 rank 47 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r14i7n4:5391:5428 [3] NCCL INFO comm 0x147848001060 rank 51 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r14i7n0:25488:25537 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n5:42528:42578 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n4:5388:5438 [0] NCCL INFO comm 0x146c80001060 rank 48 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r14i7n1:29725:29775 [0] NCCL INFO Channel 03 : 36[1a000] -> 37[1c000] via P2P/IPC +r8i3n1:64580:64623 [2] NCCL INFO Channel 03 : 22[88000] -> 14[88000] [send] via NET/IB/2 +r14i7n0:25488:25537 [1] NCCL INFO comm 0x153088001060 rank 33 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r14i7n5:42528:42578 [0] NCCL INFO comm 0x153370001060 rank 52 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r7i6n2:60566:60604 [2] NCCL INFO Channel 03 : 6[88000] -> 7[8a000] via P2P/IPC +r14i7n3:66127:66166 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n1:29726:29770 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n3:66127:66166 [1] NCCL INFO comm 0x152550001060 rank 45 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i6n2:60567:60614 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n0:25487:25522 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n1:29726:29770 [1] NCCL INFO comm 0x149590001060 rank 37 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i6n2:60567:60614 [3] NCCL INFO comm 0x150d70001060 rank 7 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r14i7n1:29725:29775 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i3n1:64578:64628 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n0:25487:25522 [0] NCCL INFO comm 0x146390001060 rank 32 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r14i7n1:29725:29775 [0] NCCL INFO comm 0x1533d0001060 rank 36 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r8i3n1:64578:64628 [0] NCCL INFO comm 0x154728001060 rank 20 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r14i7n3:66128:66176 [2] NCCL INFO Channel 03 : 46[88000] -> 42[88000] [send] via NET/IB/2 +r14i7n3:66126:66171 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n3:66126:66171 [0] NCCL INFO comm 0x1484a0001060 rank 44 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r8i5n2:15800:15838 [2] NCCL INFO Channel 03 : 30[88000] -> 26[88000] [send] via NET/IB/2 +r14i7n5:42530:42573 [2] NCCL INFO Channel 03 : 38[88000] -> 54[88000] [receive] via NET/IB/2 +r14i7n5:42530:42573 [2] NCCL INFO Channel 03 : 54[88000] -> 55[8a000] via P2P/IPC +r14i7n7:81281:81329 [2] NCCL INFO Channel 03 : 62[88000] -> 58[88000] [send] via NET/IB/2 +r14i7n5:42531:42567 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n1:29727:29760 [2] NCCL INFO Channel 03 : 6[88000] -> 38[88000] [receive] via NET/IB/2 +r14i7n5:42531:42567 [3] NCCL INFO comm 0x14fa50001060 rank 55 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r8i1n7:45557:45600 [2] NCCL INFO Channel 03 : 14[88000] -> 18[88000] [send] via NET/IB/2 +r14i7n1:29727:29760 [2] NCCL INFO Channel 03 : 38[88000] -> 39[8a000] via P2P/IPC +r14i7n1:29728:29769 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n1:29728:29769 [3] NCCL INFO comm 0x154f08001060 rank 39 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i6n2:60566:60604 [2] NCCL INFO Channel 03 : 6[88000] -> 38[88000] [send] via NET/IB/2 +r8i3n1:64580:64623 [2] NCCL INFO Channel 03 : 22[88000] -> 30[88000] [send] via NET/IB/2 +r8i1n1:21965:22008 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i1n1:21965:22008 [2] NCCL INFO comm 0x1455a0001060 rank 10 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r14i7n3:66128:66176 [2] NCCL INFO Channel 03 : 46[88000] -> 50[88000] [send] via NET/IB/2 +r7i6n2:60566:60604 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i6n2:60566:60604 [2] NCCL INFO comm 0x152f60001060 rank 6 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r8i2n1:41759:41798 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i2n1:41759:41798 [2] NCCL INFO comm 0x14dd50001060 rank 18 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r8i1n7:45557:45600 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i1n7:45557:45600 [2] NCCL INFO comm 0x152190001060 rank 14 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n1:77233:77306 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r14i7n2:39447:39496 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n2:39447:39496 [2] NCCL INFO comm 0x146b98001060 rank 42 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n1:77234:77296 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i5n2:15800:15838 [2] NCCL INFO Channel 03 : 30[88000] -> 34[88000] [send] via NET/IB/2 +r7i4n1:77234:77296 [3] NCCL INFO comm 0x14d410001060 rank 3 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r14i7n4:5390:5433 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n4:5390:5433 [2] NCCL INFO comm 0x14aca0001060 rank 50 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r14i7n7:81281:81329 [2] NCCL INFO Channel 03 : 62[88000] -> 2[88000] [send] via NET/IB/2 +r14i7n5:42530:42573 [2] NCCL INFO Channel 03 : 54[88000] -> 46[88000] [send] via NET/IB/2 +r8i3n6:76354:76402 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n1:29727:29760 [2] NCCL INFO Channel 03 : 38[88000] -> 22[88000] [send] via NET/IB/2 +r8i3n6:76354:76402 [2] NCCL INFO comm 0x14b9c8001060 rank 26 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r14i7n6:27450:27493 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n0:25489:25532 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n6:27450:27493 [2] NCCL INFO comm 0x14f8c8001060 rank 58 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r14i7n0:25489:25532 [2] NCCL INFO comm 0x1541b0001060 rank 34 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r8i5n2:15800:15838 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i5n2:15800:15838 [2] NCCL INFO comm 0x14c4f8001060 rank 30 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n1:77233:77306 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i4n1:77233:77306 [2] NCCL INFO comm 0x153d90001060 rank 2 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r14i7n5:42530:42573 [2] NCCL INFO Channel 03 : 54[88000] -> 62[88000] [send] via NET/IB/2 +r14i7n1:29727:29760 [2] NCCL INFO Channel 03 : 38[88000] -> 54[88000] [send] via NET/IB/2 +r14i7n3:66128:66176 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n3:66128:66176 [2] NCCL INFO comm 0x149250001060 rank 46 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r8i3n1:64580:64623 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n7:81281:81329 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i3n1:64580:64623 [2] NCCL INFO comm 0x14b8d8001060 rank 22 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r14i7n7:81281:81329 [2] NCCL INFO comm 0x145540001060 rank 62 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r14i7n5:42530:42573 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n5:42530:42573 [2] NCCL INFO comm 0x14a310001060 rank 54 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r14i7n1:29727:29760 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r14i7n1:29727:29760 [2] NCCL INFO comm 0x147868001060 rank 38 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +ignore me 32 +ignore me 32 +29: + duration: 1.4238 sec + algo throughput: 44949312238.5459 bps, 44.9493 Gbps + busbw: 44.2470 Gbps +30: + duration: 2.7400 sec + algo throughput: 23357761551.2913 bps, 23.3578 Gbps + busbw: 22.9928 Gbps +ignore me 32 +31: + duration: 2.7475 sec + algo throughput: 23293882339.6892 bps, 23.2939 Gbps + busbw: 22.9299 Gbps +ignore me 32 +28: + duration: 2.7050 sec + algo throughput: 23660064022.5825 bps, 23.6601 Gbps + busbw: 23.2904 Gbps +ignore me 32 +ignore me 32 +25: + duration: 2.7024 sec + algo throughput: 23682646586.1801 bps, 23.6826 Gbps + busbw: 23.3126 Gbps +32: + duration: 2.7372 sec + algo throughput: 23381556289.9381 bps, 23.3816 Gbps + busbw: 23.0162 Gbps +ignore me 32 +35: + duration: 2.7250 sec + algo throughput: 23486226145.8290 bps, 23.4862 Gbps + busbw: 23.1193 Gbps +ignore me 32 +26: + duration: 2.6719 sec + algo throughput: 23953319066.3962 bps, 23.9533 Gbps + busbw: 23.5790 Gbps +ignore me 32 +ignore me 32 +34: + duration: 2.6906 sec + algo throughput: 23786574860.4958 bps, 23.7866 Gbps + busbw: 23.4149 Gbps +27: + duration: 2.7036 sec + algo throughput: 23672329074.9777 bps, 23.6723 Gbps + busbw: 23.3024 Gbps +ignore me 32 +ignore me 32 +33: + duration: 2.6729 sec +24: + duration: 2.7194 sec + algo throughput: 23943929494.9669 bps, 23.9439 Gbps + busbw: 23.5698 Gbps + algo throughput: 23534932876.5290 bps, 23.5349 Gbps + busbw: 23.1672 Gbps +ignore me 32 +ignore me 32 +21: + duration: 2.7199 sec + algo throughput: 23530449802.3782 bps, 23.5304 Gbps + busbw: 23.1628 Gbps +22: + duration: 2.6953 sec + algo throughput: 23745283505.8812 bps, 23.7453 Gbps + busbw: 23.3743 Gbps +ignore me 32 +ignore me 32 +23: +36: + duration: 2.6422 sec + duration: 2.7147 sec + algo throughput: 23575679911.7559 bps, 23.5757 Gbps + algo throughput: 24221937860.2714 bps, 24.2219 Gbps + busbw: 23.2073 Gbps + busbw: 23.8435 Gbps +ignore me 32 +ignore me 32 +39: + duration: 2.6866 sec + algo throughput: 23821771714.9784 bps, 23.8218 Gbps + busbw: 23.4496 Gbps +20: + duration: 2.6915 sec + algo throughput: 23778723983.1553 bps, 23.7787 Gbps + busbw: 23.4072 Gbps +ignore me 32 +17: + duration: 2.7000 sec + algo throughput: 23703530808.9786 bps, 23.7035 Gbps + busbw: 23.3332 Gbps +ignore me 32 +38: + duration: 2.7053 sec + algo throughput: 23657379765.9858 bps, 23.6574 Gbps + busbw: 23.2877 Gbps +ignore me 32 +18: + duration: 2.7087 sec + algo throughput: 23627826495.1307 bps, 23.6278 Gbps + busbw: 23.2586 Gbps +ignore me 32 +19: + duration: 2.6893 sec + algo throughput: 23798098995.1087 bps, 23.7981 Gbps + busbw: 23.4263 Gbps +ignore me 32 +16: + duration: 2.7193 sec + algo throughput: 23535449127.8586 bps, 23.5354 Gbps + busbw: 23.1677 Gbps +ignore me 32 +37: + duration: 2.6878 sec + algo throughput: 23810924739.9213 bps, 23.8109 Gbps + busbw: 23.4389 Gbps +ignore me 32 +13: + duration: 2.6579 sec + algo throughput: 24079589945.4143 bps, 24.0796 Gbps + busbw: 23.7033 Gbps +ignore me 32 +14: + duration: 2.6687 sec + algo throughput: 23981976345.2383 bps, 23.9820 Gbps + busbw: 23.6073 Gbps +ignore me 32 +ignore me 32 +43: + duration: 2.6799 sec + algo throughput: 23881529023.8278 bps, 23.8815 Gbps + busbw: 23.5084 Gbps +40: + duration: 2.6675 sec + algo throughput: 23992311591.2424 bps, 23.9923 Gbps + busbw: 23.6174 Gbps +ignore me 32 +15: + duration: 2.7035 sec +ignore me 32 + algo throughput: 23672833390.0361 bps, 23.6728 Gbps + busbw: 23.3029 Gbps +42: + duration: 2.6612 sec + algo throughput: 24048980012.2614 bps, 24.0490 Gbps + busbw: 23.6732 Gbps +ignore me 32 +12: + duration: 2.6873 sec + algo throughput: 23815701948.8174 bps, 23.8157 Gbps + busbw: 23.4436 Gbps +ignore me 32 +41: + duration: 2.7323 sec + algo throughput: 23423078349.8974 bps, 23.4231 Gbps + busbw: 23.0571 Gbps +ignore me 32 +9: + duration: 2.7008 sec + algo throughput: 23697006649.9667 bps, 23.6970 Gbps + busbw: 23.3267 Gbps +ignore me 32 +10: + duration: 2.7208 sec + algo throughput: 23522906453.0563 bps, 23.5229 Gbps + busbw: 23.1554 Gbps +ignore me 32 +ignore me 32 +44: + duration: 2.7216 sec + algo throughput: 23515800543.0813 bps, 23.5158 Gbps + busbw: 23.1484 Gbps +ignore me 32 +11: + duration: 2.7418 sec + algo throughput: 23342156648.8888 bps, 23.3422 Gbps + busbw: 22.9774 Gbps +8: + duration: 2.7226 sec + algo throughput: 23507307437.4491 bps, 23.5073 Gbps + busbw: 23.1400 Gbps +ignore me 32 +47: + duration: 2.7480 sec + algo throughput: 23289700965.6135 bps, 23.2897 Gbps + busbw: 22.9258 Gbps +ignore me 32 +5: + duration: 2.6909 sec + algo throughput: 23783868684.3031 bps, 23.7839 Gbps + busbw: 23.4122 Gbps +ignore me 32 +46: + duration: 2.6782 sec + algo throughput: 23896435164.6071 bps, 23.8964 Gbps + busbw: 23.5231 Gbps +ignore me 32 +6: + duration: 2.6949 sec + algo throughput: 23748904138.5712 bps, 23.7489 Gbps + busbw: 23.3778 Gbps +ignore me 32 +ignore me 32 +45: + duration: 2.7273 sec + algo throughput: 23466730368.0785 bps, 23.4667 Gbps + busbw: 23.1001 Gbps +7: + duration: 2.6819 sec + algo throughput: 23863603392.8412 bps, 23.8636 Gbps + busbw: 23.4907 Gbps +ignore me 32 +4: + duration: 2.7313 sec + algo throughput: 23431695065.9442 bps, 23.4317 Gbps + busbw: 23.0656 Gbps +ignore me 32 +1: + duration: 2.4759 sec + algo throughput: 25848983853.4357 bps, 25.8490 Gbps + busbw: 25.4451 Gbps +ignore me 32 +ignore me 32 +2: + duration: 2.4740 sec + algo throughput: 25869549395.3480 bps, 25.8695 Gbps + busbw: 25.4653 Gbps +51: + duration: 2.7141 sec + algo throughput: 23580495129.2975 bps, 23.5805 Gbps + busbw: 23.2120 Gbps +ignore me 32 +3: + duration: 2.7923 sec + algo throughput: 22920054190.3781 bps, 22.9201 Gbps + busbw: 22.5619 Gbps +ignore me 32 +ignore me 32 +50: + duration: 2.7000 sec + algo throughput: 23703671307.9286 bps, 23.7037 Gbps + busbw: 23.3333 Gbps +ignore me 32 +ignore me 32 +48: + duration: 2.6783 sec + algo throughput: 23895395451.9701 bps, 23.8954 Gbps + busbw: 23.5220 Gbps +0: + duration: 2.7667 sec +ignore me 32 + algo throughput: 23132333701.1816 bps, 23.1323 Gbps + busbw: 22.7709 Gbps +49: + duration: 2.7238 sec + algo throughput: 23496664005.5518 bps, 23.4967 Gbps + busbw: 23.1295 Gbps +61: + duration: 2.7459 sec + algo throughput: 23307236982.8999 bps, 23.3072 Gbps + busbw: 22.9431 Gbps +ignore me 32 +ignore me 32 +ignore me 32 +63: + duration: 2.7194 sec + algo throughput: 23534413589.1336 bps, 23.5344 Gbps + busbw: 23.1667 Gbps +ignore me 32 +ignore me 32 +60: + duration: 2.7590 sec + algo throughput: 23196861557.9333 bps, 23.1969 Gbps +52: + duration: 2.6780 sec + busbw: 22.8344 Gbps + algo throughput: 23898383117.6090 bps, 23.8984 Gbps +62: + duration: 2.7047 sec + busbw: 23.5250 Gbps + algo throughput: 23662466551.6355 bps, 23.6625 Gbps + busbw: 23.2927 Gbps +ignore me 32 +55: + duration: 2.7123 sec + algo throughput: 23596530186.7741 bps, 23.5965 Gbps + busbw: 23.2278 Gbps +57: + duration: 2.7261 sec + algo throughput: 23477162203.4748 bps, 23.4772 Gbps + busbw: 23.1103 Gbps +ignore me 32 +ignore me 32 +58: + duration: 2.7148 sec + algo throughput: 23574579438.3545 bps, 23.5746 Gbps + busbw: 23.2062 Gbps +54: + duration: 2.7037 sec + algo throughput: 23671255490.3380 bps, 23.6713 Gbps + busbw: 23.3014 Gbps +ignore me 32 +ignore me 32 +59: + duration: 2.7090 sec + algo throughput: 23624545419.9078 bps, 23.6245 Gbps + busbw: 23.2554 Gbps +53: + duration: 2.7116 sec + algo throughput: 23602103526.4298 bps, 23.6021 Gbps + busbw: 23.2333 Gbps +ignore me 32 +56: + duration: 2.7226 sec + algo throughput: 23506550398.1415 bps, 23.5066 Gbps + busbw: 23.1393 Gbps +ignore me 2082 +29: + duration: 1.0835 sec + algo throughput: 59066859484.2421 bps, 59.0669 Gbps + busbw: 58.1439 Gbps +ignore me 2082 +30: + duration: 1.0837 sec + algo throughput: 59058841716.5432 bps, 59.0588 Gbps + busbw: 58.1360 Gbps +ignore me 2082 +31: + duration: 1.0835 sec + algo throughput: 59065131143.3213 bps, 59.0651 Gbps + busbw: 58.1422 Gbps +ignore me 2082 +28: + duration: 1.0835 sec + algo throughput: 59066126804.8039 bps, 59.0661 Gbps + busbw: 58.1432 Gbps +ignore me 2082 +ignore me 2082 +32: + duration: 1.0834 sec +25: + duration: 1.0835 sec + algo throughput: 59070713566.1664 bps, 59.0707 Gbps + busbw: 58.1477 Gbps + algo throughput: 59067100566.9895 bps, 59.0671 Gbps + busbw: 58.1442 Gbps +ignore me 2082 +35: + duration: 1.0835 sec + algo throughput: 59066808104.9576 bps, 59.0668 Gbps + busbw: 58.1439 Gbps +ignore me 2082 +26: + duration: 1.0836 sec + algo throughput: 59063002859.3982 bps, 59.0630 Gbps + busbw: 58.1401 Gbps +ignore me 2082 +34: + duration: 1.0835 sec + algo throughput: 59068015617.9326 bps, 59.0680 Gbps + busbw: 58.1451 Gbps +ignore me 2082 +ignore me 2082 +27: + duration: 1.0836 sec + algo throughput: 59063539002.5322 bps, 59.0635 Gbps + busbw: 58.1407 Gbps +33: + duration: 1.0835 sec + algo throughput: 59068859360.3372 bps, 59.0689 Gbps + busbw: 58.1459 Gbps +ignore me 2082 +24: + duration: 1.0836 sec + algo throughput: 59063686930.3064 bps, 59.0637 Gbps + busbw: 58.1408 Gbps +ignore me 2082 +21: + duration: 1.0831 sec + algo throughput: 59087830454.9430 bps, 59.0878 Gbps + busbw: 58.1646 Gbps +ignore me 2082 +22: + duration: 1.0835 sec + algo throughput: 59069311273.5637 bps, 59.0693 Gbps + busbw: 58.1464 Gbps +ignore me 2082 +ignore me 2082 +23: + duration: 1.0835 sec +36: + duration: 1.0834 sec + algo throughput: 59070147766.6642 bps, 59.0701 Gbps + algo throughput: 59071704972.7534 bps, 59.0717 Gbps + busbw: 58.1472 Gbps + busbw: 58.1487 Gbps +ignore me 2082 +ignore me 2082 +39: + duration: 1.0835 sec + algo throughput: 59070455646.9749 bps, 59.0705 Gbps + busbw: 58.1475 Gbps +20: + duration: 1.0835 sec + algo throughput: 59068400396.6142 bps, 59.0684 Gbps + busbw: 58.1455 Gbps +ignore me 2082 +17: + duration: 1.0835 sec + algo throughput: 59069228181.5338 bps, 59.0692 Gbps + busbw: 58.1463 Gbps +ignore me 2082 +38: + duration: 1.0835 sec + algo throughput: 59067292480.1127 bps, 59.0673 Gbps + busbw: 58.1444 Gbps +ignore me 2082 +18: + duration: 1.0835 sec + algo throughput: 59066918834.5348 bps, 59.0669 Gbps + busbw: 58.1440 Gbps +ignore me 2082 +ignore me 2082 +37: + duration: 1.0830 sec +19: + duration: 1.0835 sec + algo throughput: 59097268924.3910 bps, 59.0973 Gbps + busbw: 58.1739 Gbps + algo throughput: 59065899437.4802 bps, 59.0659 Gbps + busbw: 58.1430 Gbps +ignore me 2082 +16: + duration: 1.0835 sec + algo throughput: 59067100973.1535 bps, 59.0671 Gbps + busbw: 58.1442 Gbps +ignore me 2082 +13: + duration: 1.0834 sec + algo throughput: 59070788487.3026 bps, 59.0708 Gbps + busbw: 58.1478 Gbps +ignore me 2082 +14: + duration: 1.0835 sec + algo throughput: 59069005258.4249 bps, 59.0690 Gbps + busbw: 58.1461 Gbps +ignore me 2082 +ignore me 2082 +40: + duration: 1.0833 sec + algo throughput: 59078456811.2615 bps, 59.0785 Gbps + busbw: 58.1554 Gbps +43: + duration: 1.0834 sec + algo throughput: 59073657924.5029 bps, 59.0737 Gbps + busbw: 58.1506 Gbps +ignore me 2082 +15: + duration: 1.0834 sec + algo throughput: 59071604406.2674 bps, 59.0716 Gbps + busbw: 58.1486 Gbps +ignore me 2082 +42: + duration: 1.0835 sec + algo throughput: 59068664162.1612 bps, 59.0687 Gbps + busbw: 58.1457 Gbps +ignore me 2082 +12: + duration: 1.0835 sec + algo throughput: 59069243769.2050 bps, 59.0692 Gbps + busbw: 58.1463 Gbps +ignore me 2082 +41: + duration: 1.0835 sec + algo throughput: 59069901175.3071 bps, 59.0699 Gbps + busbw: 58.1469 Gbps +ignore me 2082 +9: + duration: 1.0834 sec + algo throughput: 59070958107.8323 bps, 59.0710 Gbps + busbw: 58.1480 Gbps +ignore me 2082 +10: + duration: 1.0835 sec + algo throughput: 59067763154.8657 bps, 59.0678 Gbps + busbw: 58.1448 Gbps +ignore me 2082 +11: + duration: 1.0831 sec + algo throughput: 59091223992.2625 bps, 59.0912 Gbps + busbw: 58.1679 Gbps +ignore me 2082 +44: + duration: 1.0836 sec + algo throughput: 59063835417.2353 bps, 59.0638 Gbps + busbw: 58.1410 Gbps +ignore me 2082 +8: + duration: 1.0836 sec + algo throughput: 59063976543.9658 bps, 59.0640 Gbps + busbw: 58.1411 Gbps +ignore me 2082 +47: + duration: 1.0835 sec + algo throughput: 59066916829.1123 bps, 59.0669 Gbps + busbw: 58.1440 Gbps +ignore me 2082 +5: + duration: 1.0835 sec + algo throughput: 59067000092.3359 bps, 59.0670 Gbps + busbw: 58.1441 Gbps +ignore me 2082 +ignore me 2082 +46: + duration: 1.0836 sec + algo throughput: 59061574740.1701 bps, 59.0616 Gbps + busbw: 58.1387 Gbps +6: + duration: 1.0835 sec + algo throughput: 59067310681.4618 bps, 59.0673 Gbps + busbw: 58.1444 Gbps +ignore me 2082 +ignore me 2082 +7: + duration: 1.0835 sec + algo throughput: 59069864388.6128 bps, 59.0699 Gbps + busbw: 58.1469 Gbps +45: + duration: 1.0836 sec + algo throughput: 59060554131.6867 bps, 59.0606 Gbps + busbw: 58.1377 Gbps +ignore me 2082 +4: + duration: 1.0835 sec + algo throughput: 59068502018.4223 bps, 59.0685 Gbps + busbw: 58.1456 Gbps +ignore me 2082 +1: + duration: 1.0835 sec + algo throughput: 59068737427.9660 bps, 59.0687 Gbps + busbw: 58.1458 Gbps +ignore me 2082 +ignore me 2082 +48: + duration: 1.0830 sec + algo throughput: 59097017685.0354 bps, 59.0970 Gbps + busbw: 58.1736 Gbps +2: + duration: 1.0835 sec + algo throughput: 59069347475.6640 bps, 59.0693 Gbps + busbw: 58.1464 Gbps +ignore me 2082 +51: + duration: 1.0837 sec + algo throughput: 59056277492.4153 bps, 59.0563 Gbps + busbw: 58.1335 Gbps +ignore me 2082 +3: + duration: 1.0837 sec + algo throughput: 59056992519.1950 bps, 59.0570 Gbps + busbw: 58.1342 Gbps +ignore me 2082 +0: + duration: 1.0839 sec + algo throughput: 59046172437.9481 bps, 59.0462 Gbps + busbw: 58.1236 Gbps +ignore me 2082 +50: + duration: 1.0844 sec + algo throughput: 59021354231.1332 bps, 59.0214 Gbps +ignore me 2082 + busbw: 58.0991 Gbps +61: + duration: 1.0842 sec + algo throughput: 59031130336.6949 bps, 59.0311 Gbps + busbw: 58.1088 Gbps +ignore me 2082 +62: + duration: 1.0840 sec + algo throughput: 59039602514.4646 bps, 59.0396 Gbps + busbw: 58.1171 Gbps +ignore me 2082 +49: + duration: 1.0850 sec + algo throughput: 58986382453.8746 bps, 58.9864 Gbps + busbw: 58.0647 Gbps +ignore me 2082 +63: + duration: 1.0847 sec + algo throughput: 59004654990.5545 bps, 59.0047 Gbps + busbw: 58.0827 Gbps +ignore me 2082 +60: + duration: 1.0849 sec + algo throughput: 58992763717.5499 bps, 58.9928 Gbps + busbw: 58.0710 Gbps +ignore me 2082 +ignore me 2082 +55: + duration: 1.0852 sec +57: + duration: 1.0851 sec + algo throughput: 58975886240.9931 bps, 58.9759 Gbps + busbw: 58.0544 Gbps +ignore me 2082 + algo throughput: 58978312463.6023 bps, 58.9783 Gbps + busbw: 58.0568 Gbps +52: + duration: 1.0854 sec + algo throughput: 58966362587.4101 bps, 58.9664 Gbps + busbw: 58.0450 Gbps +ignore me 2082 +ignore me 2082 +58: + duration: 1.0854 sec + algo throughput: 58964692791.5706 bps, 58.9647 Gbps + busbw: 58.0434 Gbps +54: + duration: 1.0854 sec + algo throughput: 58964768860.6620 bps, 58.9648 Gbps + busbw: 58.0434 Gbps +ignore me 2082 +ignore me 2082 +59: + duration: 1.0857 sec +53: + duration: 1.0856 sec + algo throughput: 58950712592.1092 bps, 58.9507 Gbps + algo throughput: 58953427702.0069 bps, 58.9534 Gbps + busbw: 58.0296 Gbps + busbw: 58.0323 Gbps +ignore me 2082 +56: + duration: 1.0857 sec + algo throughput: 58945466837.5869 bps, 58.9455 Gbps + busbw: 58.0244 Gbps +ignore me 133309 +1: + duration: 1.1391 sec + algo throughput: 56186630678.4523 bps, 56.1866 Gbps + busbw: 55.3087 Gbps +ignore me 133309 +2: + duration: 1.1388 sec + algo throughput: 56197841965.9918 bps, 56.1978 Gbps + busbw: 55.3198 Gbps +ignore me 133309 +3: + duration: 1.1386 sec + algo throughput: 56210536429.9574 bps, 56.2105 Gbps + busbw: 55.3322 Gbps +ignore me 133309 +0: + duration: 1.1384 sec + algo throughput: 56218527471.7047 bps, 56.2185 Gbps + busbw: 55.3401 Gbps +ignore me 133309 +ignore me 133309 +7: + duration: 1.1408 sec +ignore me 133309 + algo throughput: 56102562666.0355 bps, 56.1026 Gbps + busbw: 55.2260 Gbps +4: + duration: 1.1403 sec + algo throughput: 56123380167.5427 bps, 56.1234 Gbps + busbw: 55.2465 Gbps +61: + duration: 1.1382 sec + algo throughput: 56230478369.0786 bps, 56.2305 Gbps + busbw: 55.3519 Gbps +ignore me 133309 +6: + duration: 1.1415 sec +ignore me 133309 + algo throughput: 56065940519.5571 bps, 56.0659 Gbps + busbw: 55.1899 Gbps +62: + duration: 1.1380 sec + algo throughput: 56240584961.5016 bps, 56.2406 Gbps + busbw: 55.3618 Gbps +ignore me 133309 +ignore me 133309 +5: + duration: 1.1423 sec + algo throughput: 56028055753.7944 bps, 56.0281 Gbps + busbw: 55.1526 Gbps +ignore me 133309 +63: + duration: 1.1378 sec + algo throughput: 56249756590.0532 bps, 56.2498 Gbps + busbw: 55.3709 Gbps +60: + duration: 1.1375 sec + algo throughput: 56261938000.7831 bps, 56.2619 Gbps + busbw: 55.3828 Gbps +ignore me 133309 +57: + duration: 1.1373 sec + algo throughput: 56272326070.3276 bps, 56.2723 Gbps + busbw: 55.3931 Gbps +ignore me 133309 +58: + duration: 1.1371 sec + algo throughput: 56284141411.0425 bps, 56.2841 Gbps + busbw: 55.4047 Gbps +ignore me 133309 +ignore me 133309 +59: + duration: 1.1369 sec + algo throughput: 56293245261.6948 bps, 56.2932 Gbps + busbw: 55.4137 Gbps +11: + duration: 1.1441 sec + algo throughput: 55938995618.7239 bps, 55.9390 Gbps + busbw: 55.0649 Gbps +ignore me 133309 +ignore me 133309 +10: +ignore me 133309 + duration: 1.1448 sec + algo throughput: 55906467841.9515 bps, 55.9065 Gbps + busbw: 55.0329 Gbps +ignore me 133309 +56: + duration: 1.1368 sec + algo throughput: 56297972665.7302 bps, 56.2980 Gbps + busbw: 55.4183 Gbps +8: + duration: 1.1439 sec +53: + duration: 1.1373 sec + algo throughput: 55950271462.9815 bps, 55.9503 Gbps + busbw: 55.0760 Gbps + algo throughput: 56274477937.5927 bps, 56.2745 Gbps + busbw: 55.3952 Gbps +ignore me 133309 +9: + duration: 1.1456 sec +ignore me 133309 + algo throughput: 55867699042.4541 bps, 55.8677 Gbps + busbw: 54.9948 Gbps +54: + duration: 1.1379 sec + algo throughput: 56242161571.4548 bps, 56.2422 Gbps + busbw: 55.3634 Gbps +ignore me 133309 +55: + duration: 1.1387 sec + algo throughput: 56203992048.2376 bps, 56.2040 Gbps + busbw: 55.3258 Gbps +ignore me 133309 +52: + duration: 1.1389 sec + algo throughput: 56196038730.8567 bps, 56.1960 Gbps + busbw: 55.3180 Gbps +ignore me 133309 +ignore me 133309 +49: + duration: 1.1401 sec + algo throughput: 56133974251.2953 bps, 56.1340 Gbps + busbw: 55.2569 Gbps +12: + duration: 1.1468 sec + algo throughput: 55806916192.1737 bps, 55.8069 Gbps + busbw: 54.9349 Gbps +ignore me 133309 +ignore me 133309 +15: + duration: 1.1474 sec + algo throughput: 55778094162.5050 bps, 55.7781 Gbps + busbw: 54.9066 Gbps +50: + duration: 1.1412 sec + algo throughput: 56082315409.3190 bps, 56.0823 Gbps + busbw: 55.2060 Gbps +ignore me 133309 +ignore me 133309 +14: + duration: 1.1482 sec + algo throughput: 55739916871.7653 bps, 55.7399 Gbps + busbw: 54.8690 Gbps +51: + duration: 1.1424 sec + algo throughput: 56021014275.0669 bps, 56.0210 Gbps + busbw: 55.1457 Gbps +ignore me 133309 +13: + duration: 1.1487 sec + algo throughput: 55713226310.2563 bps, 55.7132 Gbps + busbw: 54.8427 Gbps +ignore me 133309 +48: + duration: 1.1431 sec + algo throughput: 55990240878.8492 bps, 55.9902 Gbps + busbw: 55.1154 Gbps +ignore me 133309 +45: + duration: 1.1445 sec + algo throughput: 55922039630.2006 bps, 55.9220 Gbps + busbw: 55.0483 Gbps +ignore me 133309 +46: + duration: 1.1452 sec + algo throughput: 55884867458.2303 bps, 55.8849 Gbps + busbw: 55.0117 Gbps +ignore me 133309 +16: + duration: 1.1498 sec + algo throughput: 55662150999.5133 bps, 55.6622 Gbps + busbw: 54.7924 Gbps +ignore me 133309 +ignore me 133309 +47: + duration: 1.1461 sec + algo throughput: 55840080608.4029 bps, 55.8401 Gbps + busbw: 54.9676 Gbps +19: + duration: 1.1504 sec + algo throughput: 55634093851.0884 bps, 55.6341 Gbps + busbw: 54.7648 Gbps +ignore me 133309 +ignore me 133309 +44: + duration: 1.1466 sec + algo throughput: 55816907945.4274 bps, 55.8169 Gbps + busbw: 54.9448 Gbps +18: + duration: 1.1511 sec +ignore me 133309 + algo throughput: 55596946472.7115 bps, 55.5969 Gbps + busbw: 54.7282 Gbps +41: + duration: 1.1484 sec + algo throughput: 55731437832.9023 bps, 55.7314 Gbps + busbw: 54.8606 Gbps +ignore me 133309 +17: + duration: 1.1519 sec + algo throughput: 55559341074.1980 bps, 55.5593 Gbps +ignore me 133309 + busbw: 54.6912 Gbps +42: + duration: 1.1492 sec + algo throughput: 55693060179.6313 bps, 55.6931 Gbps + busbw: 54.8229 Gbps +ignore me 133309 +43: + duration: 1.1499 sec + algo throughput: 55656863113.6962 bps, 55.6569 Gbps + busbw: 54.7872 Gbps +ignore me 133309 +40: + duration: 1.1502 sec +ignore me 133309 +ignore me 133309 + algo throughput: 55643327110.3709 bps, 55.6433 Gbps + busbw: 54.7739 Gbps +20: + duration: 1.1530 sec +37: + duration: 1.1516 sec + algo throughput: 55572725416.9574 bps, 55.5727 Gbps + busbw: 54.7044 Gbps + algo throughput: 55508082457.9716 bps, 55.5081 Gbps + busbw: 54.6408 Gbps +ignore me 133309 +ignore me 133309 +23: + duration: 1.1537 sec + algo throughput: 55472482295.3113 bps, 55.4725 Gbps + busbw: 54.6057 Gbps +38: + duration: 1.1527 sec + algo throughput: 55524025660.1277 bps, 55.5240 Gbps + busbw: 54.6565 Gbps +ignore me 133309 +ignore me 133309 +22: + duration: 1.1544 sec + algo throughput: 55439647607.6852 bps, 55.4396 Gbps + busbw: 54.5734 Gbps +39: + duration: 1.1536 sec + algo throughput: 55479549449.4088 bps, 55.4795 Gbps + busbw: 54.6127 Gbps +ignore me 133309 +36: + duration: 1.1543 sec + algo throughput: 55445227348.2578 bps, 55.4452 Gbps + busbw: 54.5789 Gbps +ignore me 133309 +ignore me 133309 +33: + duration: 1.1559 sec + algo throughput: 55369285535.2113 bps, 55.3693 Gbps + busbw: 54.5041 Gbps +21: + duration: 1.1554 sec + algo throughput: 55393329563.2963 bps, 55.3933 Gbps + busbw: 54.5278 Gbps +ignore me 133309 +34: + duration: 1.1566 sec + algo throughput: 55332871761.9836 bps, 55.3329 Gbps + busbw: 54.4683 Gbps +ignore me 133309 +35: + duration: 1.1574 sec + algo throughput: 55296232496.1253 bps, 55.2962 Gbps + busbw: 54.4322 Gbps +ignore me 133309 +ignore me 133309 +32: + duration: 1.1580 sec + algo throughput: 55267795287.7934 bps, 55.2678 Gbps + busbw: 54.4042 Gbps +ignore me 133309 +ignore me 133309 +27: + duration: 1.1571 sec + algo throughput: 55312877568.3247 bps, 55.3129 Gbps + busbw: 54.4486 Gbps +24: + duration: 1.1567 sec + algo throughput: 55330741896.0498 bps, 55.3307 Gbps + busbw: 54.4662 Gbps +29: + duration: 1.1597 sec + algo throughput: 55186535829.2139 bps, 55.1865 Gbps + busbw: 54.3242 Gbps +ignore me 133309 +ignore me 133309 +26: + duration: 1.1577 sec +30: + duration: 1.1597 sec + algo throughput: 55280126126.6868 bps, 55.2801 Gbps + busbw: 54.4164 Gbps + algo throughput: 55187768889.9608 bps, 55.1878 Gbps + busbw: 54.3255 Gbps +ignore me 133309 +25: + duration: 1.1585 sec + algo throughput: 55243535561.7803 bps, 55.2435 Gbps +ignore me 133309 + busbw: 54.3804 Gbps +31: + duration: 1.1595 sec + algo throughput: 55195885322.0013 bps, 55.1959 Gbps + busbw: 54.3334 Gbps +ignore me 133309 +28: + duration: 1.1593 sec + algo throughput: 55206334459.6821 bps, 55.2063 Gbps + busbw: 54.3437 Gbps +ignore me 8531839 +29: + duration: 1.0795 sec + algo throughput: 59286911481.4458 bps, 59.2869 Gbps + busbw: 58.3606 Gbps +ignore me 8531839 +30: + duration: 1.0795 sec + algo throughput: 59286408408.9583 bps, 59.2864 Gbps + busbw: 58.3601 Gbps +ignore me 8531839 +31: + duration: 1.0797 sec + algo throughput: 59278191565.0471 bps, 59.2782 Gbps + busbw: 58.3520 Gbps +ignore me 8531839 +28: + duration: 1.0799 sec +ignore me 8531839 + algo throughput: 59263025257.0778 bps, 59.2630 Gbps + busbw: 58.3370 Gbps +32: + duration: 1.0810 sec + algo throughput: 59205441780.3854 bps, 59.2054 Gbps + busbw: 58.2804 Gbps +ignore me 8531839 +35: + duration: 1.0813 sec + algo throughput: 59185679740.7383 bps, 59.1857 Gbps + busbw: 58.2609 Gbps +ignore me 8531839 +ignore me 8531839 +25: + duration: 1.0807 sec +34: + duration: 1.0818 sec + algo throughput: 59158778835.9291 bps, 59.1588 Gbps + busbw: 58.2344 Gbps + algo throughput: 59219009427.4692 bps, 59.2190 Gbps + busbw: 58.2937 Gbps +ignore me 8531839 +26: + duration: 1.0812 sec + algo throughput: 59192899586.7344 bps, 59.1929 Gbps + busbw: 58.2680 Gbps +ignore me 8531839 +33: + duration: 1.0824 sec + algo throughput: 59127256274.3892 bps, 59.1273 Gbps + busbw: 58.2034 Gbps +ignore me 8531839 +27: + duration: 1.0817 sec + algo throughput: 59167287492.5382 bps, 59.1673 Gbps + busbw: 58.2428 Gbps +ignore me 8531839 +24: + duration: 1.0818 sec + algo throughput: 59159151072.6827 bps, 59.1592 Gbps + busbw: 58.2348 Gbps +ignore me 8531839 +21: + duration: 1.0829 sec + algo throughput: 59099415307.5828 bps, 59.0994 Gbps + busbw: 58.1760 Gbps +ignore me 8531839 +36: + duration: 1.0833 sec + algo throughput: 59079977681.5775 bps, 59.0800 Gbps + busbw: 58.1569 Gbps +ignore me 8531839 +ignore me 8531839 +22: + duration: 1.0837 sec + algo throughput: 59055663121.6989 bps, 59.0557 Gbps + busbw: 58.1329 Gbps +39: + duration: 1.0837 sec + algo throughput: 59055189417.1339 bps, 59.0552 Gbps + busbw: 58.1325 Gbps +ignore me 8531839 +ignore me 8531839 +23: + duration: 1.0842 sec + algo throughput: 59030011445.3982 bps, 59.0300 Gbps + busbw: 58.1077 Gbps +38: + duration: 1.0842 sec + algo throughput: 59028259984.3437 bps, 59.0283 Gbps + busbw: 58.1059 Gbps +ignore me 8531839 +20: + duration: 1.0847 sec + algo throughput: 59004785777.9222 bps, 59.0048 Gbps + busbw: 58.0828 Gbps +ignore me 8531839 +17: + duration: 1.0855 sec + algo throughput: 58958978304.9890 bps, 58.9590 Gbps + busbw: 58.0377 Gbps +ignore me 8531839 +37: + duration: 1.0849 sec +ignore me 8531839 + algo throughput: 58991461897.3442 bps, 58.9915 Gbps + busbw: 58.0697 Gbps +18: + duration: 1.0860 sec + algo throughput: 58929937869.1926 bps, 58.9299 Gbps + busbw: 58.0092 Gbps +ignore me 8531839 +19: + duration: 1.0866 sec + algo throughput: 58900757003.1444 bps, 58.9008 Gbps + busbw: 57.9804 Gbps +ignore me 8531839 +ignore me 8531839 +ignore me 8531839 +16: + duration: 1.0870 sec + algo throughput: 58880195126.7510 bps, 58.8802 Gbps + busbw: 57.9602 Gbps +13: + duration: 1.0876 sec +40: + duration: 1.0857 sec + algo throughput: 58842732172.8244 bps, 58.8427 Gbps + busbw: 57.9233 Gbps + algo throughput: 58945852979.2074 bps, 58.9459 Gbps + busbw: 58.0248 Gbps +ignore me 8531839 +ignore me 8531839 +43: + duration: 1.0861 sec +14: + duration: 1.0880 sec + algo throughput: 58927187378.4591 bps, 58.9272 Gbps + busbw: 58.0065 Gbps + algo throughput: 58820897723.0911 bps, 58.8209 Gbps + busbw: 57.9018 Gbps +ignore me 8531839 +42: + duration: 1.0866 sec + algo throughput: 58901245146.1473 bps, 58.9012 Gbps + busbw: 57.9809 Gbps +ignore me 8531839 +15: + duration: 1.0886 sec + algo throughput: 58791413029.8789 bps, 58.7914 Gbps + busbw: 57.8728 Gbps +ignore me 8531839 +41: + duration: 1.0871 sec +ignore me 8531839 + algo throughput: 58871381066.5900 bps, 58.8714 Gbps + busbw: 57.9515 Gbps +12: + duration: 1.0890 sec + algo throughput: 58771752987.2678 bps, 58.7718 Gbps + busbw: 57.8534 Gbps +ignore me 8531839 +9: + duration: 1.0899 sec + algo throughput: 58719761283.1440 bps, 58.7198 Gbps + busbw: 57.8023 Gbps +ignore me 8531839 +10: + duration: 1.0905 sec + algo throughput: 58691072684.4358 bps, 58.6911 Gbps + busbw: 57.7740 Gbps +ignore me 8531839 +ignore me 8531839 +11: + duration: 1.0909 sec +44: + duration: 1.0880 sec + algo throughput: 58668924615.5916 bps, 58.6689 Gbps + busbw: 57.7522 Gbps + algo throughput: 58823636890.2922 bps, 58.8236 Gbps + busbw: 57.9045 Gbps +ignore me 8531839 +ignore me 8531839 +47: + duration: 1.0884 sec + algo throughput: 58802147709.6931 bps, 58.8021 Gbps + busbw: 57.8834 Gbps +8: + duration: 1.0909 sec + algo throughput: 58669493348.8639 bps, 58.6695 Gbps + busbw: 57.7528 Gbps +ignore me 8531839 +ignore me 8531839 +46: + duration: 1.0889 sec +5: + duration: 1.0922 sec + algo throughput: 58776404011.0439 bps, 58.7764 Gbps + busbw: 57.8580 Gbps + algo throughput: 58595292130.0957 bps, 58.5953 Gbps + busbw: 57.6797 Gbps +ignore me 8531839 +6: + duration: 1.0927 sec + algo throughput: 58568672254.4708 bps, 58.5687 Gbps + busbw: 57.6535 Gbps +ignore me 8531839 +45: + duration: 1.0894 sec +ignore me 8531839 + algo throughput: 58746329829.1854 bps, 58.7463 Gbps + busbw: 57.8284 Gbps +7: + duration: 1.0933 sec + algo throughput: 58538489260.3194 bps, 58.5385 Gbps + busbw: 57.6238 Gbps +ignore me 8531839 +4: + duration: 1.0935 sec + algo throughput: 58528192503.3891 bps, 58.5282 Gbps + busbw: 57.6137 Gbps +ignore me 8531839 +1: + duration: 1.0945 sec + algo throughput: 58474433972.9440 bps, 58.4744 Gbps + busbw: 57.5608 Gbps +ignore me 8531839 +ignore me 8531839 +48: + duration: 1.0903 sec + algo throughput: 58701374285.9175 bps, 58.7014 Gbps + busbw: 57.7842 Gbps +2: + duration: 1.0945 sec +ignore me 8531839 + algo throughput: 58473699049.7601 bps, 58.4737 Gbps + busbw: 57.5600 Gbps +51: + duration: 1.0907 sec + algo throughput: 58677790184.1529 bps, 58.6778 Gbps + busbw: 57.7609 Gbps +ignore me 8531839 +3: + duration: 1.0946 sec + algo throughput: 58471182051.0963 bps, 58.4712 Gbps + busbw: 57.5576 Gbps +ignore me 8531839 +50: + duration: 1.0912 sec + algo throughput: 58649778797.4147 bps, 58.6498 Gbps + busbw: 57.7334 Gbps +ignore me 8531839 +0: + duration: 1.0945 sec + algo throughput: 58475366330.2289 bps, 58.4754 Gbps + busbw: 57.5617 Gbps +ignore me 8531839 +61: + duration: 1.0945 sec + algo throughput: 58475001902.2709 bps, 58.4750 Gbps + busbw: 57.5613 Gbps +ignore me 8531839 +ignore me 8531839 +49: + duration: 1.0920 sec + algo throughput: 58610507438.4729 bps, 58.6105 Gbps + busbw: 57.6947 Gbps +62: + duration: 1.0944 sec + algo throughput: 58479376830.5396 bps, 58.4794 Gbps + busbw: 57.5656 Gbps +ignore me 8531839 +ignore me 8531839 +63: + duration: 1.0943 sec + algo throughput: 58483011432.8103 bps, 58.4830 Gbps + busbw: 57.5692 Gbps +60: + duration: 1.0943 sec + algo throughput: 58482721292.0378 bps, 58.4827 Gbps + busbw: 57.5689 Gbps +ignore me 8531839 +ignore me 8531839 +ignore me 8531839 +55: + duration: 1.0930 sec + algo throughput: 58557035155.2496 bps, 58.5570 Gbps + busbw: 57.6421 Gbps +52: +57: + duration: 1.0944 sec + duration: 1.0927 sec + algo throughput: 58569873887.8907 bps, 58.5699 Gbps + busbw: 57.6547 Gbps + algo throughput: 58481957941.1610 bps, 58.4820 Gbps + busbw: 57.5682 Gbps +ignore me 8531839 +ignore me 8531839 +58: + duration: 1.0943 sec + algo throughput: 58482942275.5710 bps, 58.4829 Gbps + busbw: 57.5691 Gbps +54: + duration: 1.0935 sec + algo throughput: 58529632382.1275 bps, 58.5296 Gbps + busbw: 57.6151 Gbps +ignore me 8531839 +ignore me 8531839 +59: + duration: 1.0943 sec +53: + duration: 1.0939 sec + algo throughput: 58484596221.1178 bps, 58.4846 Gbps + busbw: 57.5708 Gbps + algo throughput: 58504811528.0580 bps, 58.5048 Gbps + busbw: 57.5907 Gbps +ignore me 8531839 +56: + duration: 1.0943 sec + algo throughput: 58486115439.8358 bps, 58.4861 Gbps + busbw: 57.5723 Gbps +ignore me 546037696 +1: + duration: 1.1476 sec + algo throughput: 55770762346.0617 bps, 55.7708 Gbps + busbw: 54.8993 Gbps +ignore me 546037696 +2: + duration: 1.1475 sec + algo throughput: 55771017398.3502 bps, 55.7710 Gbps + busbw: 54.8996 Gbps +ignore me 546037696 +3: + duration: 1.1475 sec + algo throughput: 55771333738.8483 bps, 55.7713 Gbps + busbw: 54.8999 Gbps +ignore me 546037696 +0: + duration: 1.1476 sec + algo throughput: 55769516968.5215 bps, 55.7695 Gbps + busbw: 54.8981 Gbps +ignore me 546037696 +ignore me 546037696 +61: + duration: 1.1476 sec + algo throughput: 55770292281.9796 bps, 55.7703 Gbps + busbw: 54.8989 Gbps +ignore me 546037696 +4: + duration: 1.1487 sec + algo throughput: 55717449805.5686 bps, 55.7174 Gbps + busbw: 54.8469 Gbps +7: + duration: 1.1489 sec + algo throughput: 55705382055.5402 bps, 55.7054 Gbps + busbw: 54.8350 Gbps +ignore me 546037696 +62: + duration: 1.1476 sec +ignore me 546037696 + algo throughput: 55768766319.9799 bps, 55.7688 Gbps + busbw: 54.8974 Gbps +6: + duration: 1.1494 sec + algo throughput: 55680938790.6285 bps, 55.6809 Gbps + busbw: 54.8109 Gbps +ignore me 546037696 +ignore me 546037696 +5: + duration: 1.1498 sec +63: + duration: 1.1477 sec + algo throughput: 55765849797.1745 bps, 55.7658 Gbps + busbw: 54.8945 Gbps + algo throughput: 55661444152.7538 bps, 55.6614 Gbps + busbw: 54.7917 Gbps +ignore me 546037696 +60: + duration: 1.1477 sec + algo throughput: 55762914009.1087 bps, 55.7629 Gbps + busbw: 54.8916 Gbps +ignore me 546037696 +57: + duration: 1.1477 sec + algo throughput: 55765430612.6848 bps, 55.7654 Gbps + busbw: 54.8941 Gbps +ignore me 546037696 +58: + duration: 1.1477 sec + algo throughput: 55763080232.6062 bps, 55.7631 Gbps + busbw: 54.8918 Gbps +ignore me 546037696 +ignore me 546037696 +ignore me 546037696 +59: + duration: 1.1477 sec +8: + duration: 1.1508 sec + algo throughput: 55762394643.4226 bps, 55.7624 Gbps + busbw: 54.8911 Gbps + algo throughput: 55611961476.5661 bps, 55.6120 Gbps + busbw: 54.7430 Gbps +11: + duration: 1.1511 sec + algo throughput: 55598461044.5628 bps, 55.5985 Gbps + busbw: 54.7297 Gbps +ignore me 546037696 +ignore me 546037696 +56: + duration: 1.1478 sec +10: + duration: 1.1515 sec + algo throughput: 55760993562.6936 bps, 55.7610 Gbps + busbw: 54.8897 Gbps + algo throughput: 55577492254.7561 bps, 55.5775 Gbps + busbw: 54.7091 Gbps +ignore me 546037696 +53: + duration: 1.1481 sec + algo throughput: 55742639371.6693 bps, 55.7426 Gbps + busbw: 54.8717 Gbps +ignore me 546037696 +ignore me 546037696 +9: + duration: 1.1521 sec + algo throughput: 55548756769.4051 bps, 55.5488 Gbps + busbw: 54.6808 Gbps +54: + duration: 1.1486 sec + algo throughput: 55722188346.6659 bps, 55.7222 Gbps + busbw: 54.8515 Gbps +ignore me 546037696 +55: + duration: 1.1491 sec + algo throughput: 55696658264.1312 bps, 55.6967 Gbps + busbw: 54.8264 Gbps +ignore me 546037696 +52: + duration: 1.1493 sec + algo throughput: 55684052078.0344 bps, 55.6841 Gbps + busbw: 54.8140 Gbps +ignore me 546037696 +ignore me 546037696 +49: + duration: 1.1501 sec + algo throughput: 55649095693.8313 bps, 55.6491 Gbps + busbw: 54.7796 Gbps +12: + duration: 1.1530 sec + algo throughput: 55505198112.8937 bps, 55.5052 Gbps + busbw: 54.6379 Gbps +ignore me 546037696 +50: + duration: 1.1508 sec +ignore me 546037696 + algo throughput: 55613869060.5475 bps, 55.6139 Gbps + busbw: 54.7449 Gbps +ignore me 546037696 +15: + duration: 1.1535 sec + algo throughput: 55481402494.6963 bps, 55.4814 Gbps + busbw: 54.6145 Gbps +14: + duration: 1.1539 sec + algo throughput: 55463380623.3736 bps, 55.4634 Gbps + busbw: 54.5968 Gbps +ignore me 546037696 +51: + duration: 1.1513 sec + algo throughput: 55589348274.9666 bps, 55.5893 Gbps + busbw: 54.7208 Gbps +ignore me 546037696 +13: + duration: 1.1544 sec + algo throughput: 55438208761.5075 bps, 55.4382 Gbps + busbw: 54.5720 Gbps +ignore me 546037696 +48: + duration: 1.1518 sec + algo throughput: 55566775315.2157 bps, 55.5668 Gbps + busbw: 54.6985 Gbps +ignore me 546037696 +45: + duration: 1.1525 sec + algo throughput: 55529057698.1972 bps, 55.5291 Gbps + busbw: 54.6614 Gbps +ignore me 546037696 +46: + duration: 1.1531 sec + algo throughput: 55500407337.6886 bps, 55.5004 Gbps + busbw: 54.6332 Gbps +ignore me 546037696 +ignore me 546037696 +47: + duration: 1.1536 sec + algo throughput: 55476425397.6833 bps, 55.4764 Gbps + busbw: 54.6096 Gbps +16: + duration: 1.1552 sec + algo throughput: 55400712115.8729 bps, 55.4007 Gbps + busbw: 54.5351 Gbps +ignore me 546037696 +19: + duration: 1.1556 sec +ignore me 546037696 + algo throughput: 55382462428.1853 bps, 55.3825 Gbps + busbw: 54.5171 Gbps +44: + duration: 1.1540 sec + algo throughput: 55458928869.5273 bps, 55.4589 Gbps + busbw: 54.5924 Gbps +ignore me 546037696 +ignore me 546037696 +41: + duration: 1.1549 sec + algo throughput: 55413716946.1284 bps, 55.4137 Gbps + busbw: 54.5479 Gbps +18: + duration: 1.1561 sec + algo throughput: 55357170878.3853 bps, 55.3572 Gbps + busbw: 54.4922 Gbps +ignore me 546037696 +ignore me 546037696 +17: + duration: 1.1566 sec +42: + duration: 1.1554 sec + algo throughput: 55390228161.2849 bps, 55.3902 Gbps + algo throughput: 55336032557.8372 bps, 55.3360 Gbps + busbw: 54.4714 Gbps + busbw: 54.5248 Gbps +ignore me 546037696 +43: + duration: 1.1560 sec + algo throughput: 55365107742.2023 bps, 55.3651 Gbps + busbw: 54.5000 Gbps +ignore me 546037696 +40: + duration: 1.1563 sec + algo throughput: 55350589759.8898 bps, 55.3506 Gbps + busbw: 54.4857 Gbps +ignore me 546037696 +37: + duration: 1.1571 sec + algo throughput: 55309680398.2002 bps, 55.3097 Gbps + busbw: 54.4455 Gbps +ignore me 546037696 +ignore me 546037696 +20: + duration: 1.1575 sec + algo throughput: 55290520639.7217 bps, 55.2905 Gbps + busbw: 54.4266 Gbps +38: + duration: 1.1578 sec + algo throughput: 55277908297.7533 bps, 55.2779 Gbps + busbw: 54.4142 Gbps +ignore me 546037696 +23: + duration: 1.1580 sec + algo throughput: 55268827063.0441 bps, 55.2688 Gbps + busbw: 54.4053 Gbps +ignore me 546037696 +ignore me 546037696 +39: + duration: 1.1583 sec + algo throughput: 55253175352.9890 bps, 55.2532 Gbps + busbw: 54.3898 Gbps +22: + duration: 1.1584 sec + algo throughput: 55246403812.5930 bps, 55.2464 Gbps + busbw: 54.3832 Gbps +ignore me 546037696 +36: + duration: 1.1587 sec + algo throughput: 55233935159.9718 bps, 55.2339 Gbps + busbw: 54.3709 Gbps +ignore me 546037696 +21: + duration: 1.1590 sec +ignore me 546037696 + algo throughput: 55221176178.2793 bps, 55.2212 Gbps + busbw: 54.3583 Gbps +33: + duration: 1.1596 sec + algo throughput: 55191858700.9777 bps, 55.1919 Gbps + busbw: 54.3295 Gbps +ignore me 546037696 +34: + duration: 1.1601 sec + algo throughput: 55166645593.2811 bps, 55.1666 Gbps + busbw: 54.3047 Gbps +ignore me 546037696 +ignore me 546037696 +35: + duration: 1.1606 sec + algo throughput: 55142290362.2515 bps, 55.1423 Gbps + busbw: 54.2807 Gbps +ignore me 546037696 +ignore me 546037696 +24: + duration: 1.1599 sec +32: + duration: 1.1609 sec + algo throughput: 55174945593.5325 bps, 55.1749 Gbps + busbw: 54.3128 Gbps + algo throughput: 55127795096.6593 bps, 55.1278 Gbps + busbw: 54.2664 Gbps +29: + duration: 1.1624 sec + algo throughput: 55058666612.5252 bps, 55.0587 Gbps + busbw: 54.1984 Gbps +ignore me 546037696 +27: + duration: 1.1603 sec + algo throughput: 55155878033.7081 bps, 55.1559 Gbps + busbw: 54.2941 Gbps +ignore me 546037696 +30: + duration: 1.1624 sec + algo throughput: 55056140811.3376 bps, 55.0561 Gbps + busbw: 54.1959 Gbps +ignore me 546037696 +26: + duration: 1.1609 sec + algo throughput: 55131348773.8920 bps, 55.1313 Gbps + busbw: 54.2699 Gbps +ignore me 546037696 +ignore me 546037696 +31: + duration: 1.1623 sec + algo throughput: 55064778996.1452 bps, 55.0648 Gbps + busbw: 54.2044 Gbps +25: + duration: 1.1614 sec + algo throughput: 55107526353.1258 bps, 55.1075 Gbps + busbw: 54.2465 Gbps +ignore me 546037696 +28: + duration: 1.1620 sec + algo throughput: 55076608455.7517 bps, 55.0766 Gbps + busbw: 54.2160 Gbps diff --git a/experiments/bandwidth/n1_16gb_all_reduce_bench.txt b/experiments/bandwidth/n1_16gb_all_reduce_bench.txt new file mode 100644 index 0000000000000000000000000000000000000000..38aac7f28d1d357190fd501ab28e987a49be571a --- /dev/null +++ b/experiments/bandwidth/n1_16gb_all_reduce_bench.txt @@ -0,0 +1,264 @@ +export NCCL_DEBUG=info +python -m torch.distributed.launch --nproc_per_node=4 all_reduce_bench.py + +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +local_rank: 2 +local_rank: 3 +local_rank: 1 +local_rank: 0 +0 data size: 4.0 GB +2 data size: 4.0 GB +1 data size: 4.0 GB +3 data size: 4.0 GB +r10i4n8:38029:38029 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.8.71<0> [1]ib1:10.149.8.71<0> +r10i4n8:38029:38029 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r10i4n8:38029:38029 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.8.71<0> +r10i4n8:38029:38029 [0] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r10i4n8:38030:38030 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.8.71<0> [1]ib1:10.149.8.71<0> +r10i4n8:38030:38030 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r10i4n8:38030:38030 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.8.71<0> +r10i4n8:38030:38030 [1] NCCL INFO Using network IB +r10i4n8:38032:38032 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.8.71<0> [1]ib1:10.149.8.71<0> +r10i4n8:38032:38032 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r10i4n8:38031:38031 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.8.71<0> [1]ib1:10.149.8.71<0> +r10i4n8:38031:38031 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r10i4n8:38032:38032 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.8.71<0> +r10i4n8:38032:38032 [3] NCCL INFO Using network IB +r10i4n8:38031:38031 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.8.71<0> +r10i4n8:38031:38031 [2] NCCL INFO Using network IB +r10i4n8:38029:38066 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r10i4n8:38029:38066 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r10i4n8:38029:38066 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r10i4n8:38029:38066 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r10i4n8:38030:38071 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r10i4n8:38032:38077 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r10i4n8:38029:38066 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r10i4n8:38031:38081 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r10i4n8:38029:38066 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r10i4n8:38029:38066 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r10i4n8:38029:38066 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r10i4n8:38029:38066 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r10i4n8:38030:38071 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r10i4n8:38029:38066 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r10i4n8:38032:38077 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r10i4n8:38029:38066 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r10i4n8:38029:38066 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r10i4n8:38031:38081 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r10i4n8:38030:38071 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff00,000fffff +r10i4n8:38032:38077 [3] NCCL INFO Setting affinity for GPU 3 to ffff,f00000ff,fff00000 +r10i4n8:38031:38081 [2] NCCL INFO Setting affinity for GPU 2 to ffff,f00000ff,fff00000 +r10i4n8:38029:38066 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r10i4n8:38029:38066 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r10i4n8:38029:38066 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff00,000fffff +r10i4n8:38032:38077 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r10i4n8:38031:38081 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r10i4n8:38032:38077 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r10i4n8:38029:38066 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r10i4n8:38030:38071 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r10i4n8:38030:38071 [1] NCCL INFO comm 0x14dbb0001060 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r10i4n8:38031:38081 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r10i4n8:38031:38081 [2] NCCL INFO comm 0x150950001060 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r10i4n8:38032:38077 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r10i4n8:38032:38077 [3] NCCL INFO comm 0x14ccd8001060 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r10i4n8:38029:38066 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r10i4n8:38029:38066 [0] NCCL INFO comm 0x149bac001060 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r10i4n8:38029:38029 [0] NCCL INFO Launch mode Parallel +ignore me 1 +ignore me 1 +ignore me 1 +0: + duration: 0.6633 sec + algo throughput: 96488131490.3540 bps, 96.4881 Gbps + busbw: 72.3661 Gbps +1: + duration: 0.4507 sec + algo throughput: 142007505620.8443 bps, 142.0075 Gbps + busbw: 106.5056 Gbps +2: + duration: 0.4203 sec + algo throughput: 152274131784.9601 bps, 152.2741 Gbps + busbw: 114.2056 Gbps +ignore me 1 +3: + duration: 0.4225 sec + algo throughput: 151490688123.0876 bps, 151.4907 Gbps + busbw: 113.6180 Gbps +ignore me 7 +ignore me 7 +ignore me 7 +3: + duration: 0.0479 sec + algo throughput: 1336658447010.4644 bps, 1336.6584 Gbps + busbw: 1002.4938 Gbps +ignore me 7 +1: + duration: 0.0483 sec + algo throughput: 1325019685494.1951 bps, 1325.0197 Gbps + busbw: 993.7648 Gbps +0: + duration: 0.0483 sec + algo throughput: 1323924013812.1467 bps, 1323.9240 Gbps + busbw: 992.9430 Gbps +2: + duration: 0.0483 sec + algo throughput: 1324507343140.4290 bps, 1324.5073 Gbps + busbw: 993.3805 Gbps +ignore me 31 +ignore me 31 +ignore me 31 +ignore me 31 +3: + duration: 0.0479 sec + algo throughput: 1335850436641.9412 bps, 1335.8504 Gbps + busbw: 1001.8878 Gbps +2: + duration: 0.0478 sec + algo throughput: 1338717258044.6157 bps, 1338.7173 Gbps + busbw: 1004.0379 Gbps +0: + duration: 0.0479 sec + algo throughput: 1336480609710.5195 bps, 1336.4806 Gbps + busbw: 1002.3605 Gbps +1: + duration: 0.0479 sec + algo throughput: 1335644997705.6060 bps, 1335.6450 Gbps + busbw: 1001.7337 Gbps +ignore me 124 +ignore me 124 +ignore me 124 +2: + duration: 0.0479 sec + algo throughput: 1337297229056.0354 bps, 1337.2972 Gbps + busbw: 1002.9729 Gbps +0: + duration: 0.0479 sec + algo throughput: 1337048861958.8491 bps, 1337.0489 Gbps + busbw: 1002.7866 Gbps +ignore me 124 +1: + duration: 0.0479 sec + algo throughput: 1337386146372.2676 bps, 1337.3861 Gbps + busbw: 1003.0396 Gbps +3: + duration: 0.0480 sec + algo throughput: 1333613993474.4404 bps, 1333.6140 Gbps + busbw: 1000.2105 Gbps +ignore me 496 +ignore me 496 +ignore me 496 +ignore me 496 +2: + duration: 0.0481 sec + algo throughput: 1329998661494.7930 bps, 1329.9987 Gbps + busbw: 997.4990 Gbps +3: + duration: 0.0480 sec + algo throughput: 1333082662016.4126 bps, 1333.0827 Gbps + busbw: 999.8120 Gbps +1: + duration: 0.0481 sec + algo throughput: 1330394518818.0288 bps, 1330.3945 Gbps + busbw: 997.7959 Gbps +0: + duration: 0.0481 sec + algo throughput: 1329424219916.1433 bps, 1329.4242 Gbps + busbw: 997.0682 Gbps diff --git a/experiments/bandwidth/n1_32gb_all_reduce_bench.txt b/experiments/bandwidth/n1_32gb_all_reduce_bench.txt new file mode 100644 index 0000000000000000000000000000000000000000..be0c7c00f14a5908c75dcd0d8ae019c009f24e42 --- /dev/null +++ b/experiments/bandwidth/n1_32gb_all_reduce_bench.txt @@ -0,0 +1,264 @@ +export NCCL_DEBUG=info +python -m torch.distributed.launch --nproc_per_node=4 all_reduce_bench.py + +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +local_rank: 3 +local_rank: 1 +local_rank: 0 +local_rank: 2 +0 data size: 4.0 GB +2 data size: 4.0 GB +3 data size: 4.0 GB +1 data size: 4.0 GB +r7i4n1:63120:63120 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.76<0> [1]ib1:10.149.0.76<0> +r7i4n1:63120:63120 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i4n1:63120:63120 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.76<0> +r7i4n1:63120:63120 [0] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r7i4n1:63123:63123 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.76<0> [1]ib1:10.149.0.76<0> +r7i4n1:63121:63121 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.76<0> [1]ib1:10.149.0.76<0> +r7i4n1:63123:63123 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i4n1:63121:63121 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i4n1:63121:63121 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.76<0> +r7i4n1:63123:63123 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.76<0> +r7i4n1:63121:63121 [1] NCCL INFO Using network IB +r7i4n1:63123:63123 [3] NCCL INFO Using network IB +r7i4n1:63122:63122 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.76<0> [1]ib1:10.149.0.76<0> +r7i4n1:63122:63122 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i4n1:63122:63122 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.76<0> +r7i4n1:63122:63122 [2] NCCL INFO Using network IB +r7i4n1:63120:63191 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i4n1:63122:63194 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n1:63121:63193 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n1:63123:63192 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n1:63120:63191 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i4n1:63122:63194 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i4n1:63121:63193 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i4n1:63120:63191 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i4n1:63123:63192 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i4n1:63122:63194 [2] NCCL INFO Setting affinity for GPU 2 to ffff,f00000ff,fff00000 +r7i4n1:63120:63191 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i4n1:63121:63193 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff00,000fffff +r7i4n1:63123:63192 [3] NCCL INFO Setting affinity for GPU 3 to ffff,f00000ff,fff00000 +r7i4n1:63120:63191 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i4n1:63122:63194 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i4n1:63120:63191 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i4n1:63120:63191 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i4n1:63120:63191 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i4n1:63120:63191 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i4n1:63120:63191 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i4n1:63120:63191 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i4n1:63120:63191 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n1:63120:63191 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i4n1:63120:63191 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff00,000fffff +r7i4n1:63123:63192 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n1:63122:63194 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n1:63123:63192 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n1:63120:63191 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n1:63121:63193 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n1:63121:63193 [1] NCCL INFO comm 0x148f80001060 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i4n1:63122:63194 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n1:63122:63194 [2] NCCL INFO comm 0x152f00001060 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n1:63123:63192 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n1:63120:63191 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n1:63123:63192 [3] NCCL INFO comm 0x146050001060 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i4n1:63120:63191 [0] NCCL INFO comm 0x14f24c001060 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n1:63120:63120 [0] NCCL INFO Launch mode Parallel +ignore me 2 +ignore me 2 +ignore me 2 +3: + duration: 0.6125 sec + algo throughput: 104487664227.6784 bps, 104.4877 Gbps + busbw: 78.3657 Gbps +0: + duration: 0.5584 sec + algo throughput: 114613183387.2373 bps, 114.6132 Gbps + busbw: 85.9599 Gbps +2: + duration: 0.5140 sec + algo throughput: 124513941981.7996 bps, 124.5139 Gbps + busbw: 93.3855 Gbps +ignore me 2 +1: + duration: 0.6245 sec + algo throughput: 102486528362.0469 bps, 102.4865 Gbps + busbw: 76.8649 Gbps +ignore me 11 +ignore me 11 +ignore me 11 +ignore me 11 +1: + duration: 0.0479 sec + algo throughput: 1337346013047.7080 bps, 1337.3460 Gbps + busbw: 1003.0095 Gbps +2: + duration: 0.0482 sec + algo throughput: 1328071705904.8621 bps, 1328.0717 Gbps + busbw: 996.0538 Gbps +3: + duration: 0.0483 sec + algo throughput: 1325052362787.1750 bps, 1325.0524 Gbps + busbw: 993.7893 Gbps +0: + duration: 0.0483 sec + algo throughput: 1325619195876.0120 bps, 1325.6192 Gbps + busbw: 994.2144 Gbps +ignore me 45 +ignore me 45 +ignore me 45 +ignore me 45 +1: + duration: 0.0485 sec + algo throughput: 1319242278750.3755 bps, 1319.2423 Gbps + busbw: 989.4317 Gbps +3: + duration: 0.0485 sec + algo throughput: 1320339103321.9136 bps, 1320.3391 Gbps + busbw: 990.2543 Gbps +2: + duration: 0.0485 sec + algo throughput: 1318722904549.9961 bps, 1318.7229 Gbps + busbw: 989.0422 Gbps +0: + duration: 0.0485 sec + algo throughput: 1320313583319.3479 bps, 1320.3136 Gbps + busbw: 990.2352 Gbps +ignore me 183 +ignore me 183 +ignore me 183 +ignore me 183 +2: + duration: 0.0484 sec + algo throughput: 1322236494553.5015 bps, 1322.2365 Gbps + busbw: 991.6774 Gbps +0: + duration: 0.0484 sec + algo throughput: 1321797181142.1807 bps, 1321.7972 Gbps + busbw: 991.3479 Gbps +1: + duration: 0.0485 sec + algo throughput: 1318282723325.4265 bps, 1318.2827 Gbps + busbw: 988.7120 Gbps +3: + duration: 0.0485 sec + algo throughput: 1320550708735.8535 bps, 1320.5507 Gbps + busbw: 990.4130 Gbps +ignore me 733 +ignore me 733 +ignore me 733 +1: + duration: 0.0483 sec + algo throughput: 1323715979433.6658 bps, 1323.7160 Gbps + busbw: 992.7870 Gbps +2: + duration: 0.0484 sec + algo throughput: 1322345035832.8503 bps, 1322.3450 Gbps + busbw: 991.7588 Gbps +ignore me 733 +3: + duration: 0.0484 sec + algo throughput: 1323624408929.4016 bps, 1323.6244 Gbps + busbw: 992.7183 Gbps +0: + duration: 0.0485 sec + algo throughput: 1319272113636.8833 bps, 1319.2721 Gbps + busbw: 989.4541 Gbps diff --git a/experiments/gpt2-16gb-nodes.md b/experiments/gpt2-16gb-nodes.md new file mode 100644 index 0000000000000000000000000000000000000000..d49a937c854607fa303a67eb4f3de9e506ee89be --- /dev/null +++ b/experiments/gpt2-16gb-nodes.md @@ -0,0 +1,1591 @@ +# GPT2 Experiments + +Scripts and logs of GPT2 experiments on Jean Zay HPC. + +Using 4x VT100 16GB nodes. + +For now can't really allocate many 32gb nodes so can't do any serious evaluation there. +(add `-C v100-32g` for 32gb nodes.) + +## Megatron-LM + +Constants: + +- `TP_SIZE` = tensor parallel +- `PP_SIZE` = pipeline parallel +- `DP_SIZE` = data parallel is derived automatically from `WORLD_SIZE / (TP_SIZE * PP_SIZE)` +- `WORLD_SIZE` = total number of GPUs + +According to Megatron-LM paper the highest degree of TP we can use is 4 for 4-gpu nodes - crossing nodes would slow things down a lot. So max `TP_SIZE=4`. So the full 4 gpu node is used only for tensor parallel dimension. + + +### Summary + +This section summarizes the numbers from the experiment sections below: + +**Megatron**: + +Not yet optimized with NVIDIA team! + +| GPUs | Size | Micro-BS | PP Chunks | DP | PP | Throughput | +| ---: | ---: | -------: | --------: | --: | -: | ---------: | +| 16 | 7.5B | 1 | 4 | 1 | 4 | 661ms | +| 64 | 30B | 1 | 4 | 1 | 16 | 1439ms | +| 128 | 50B | 1 | 4 | 1 | 32 | 2124ms | +| 256 | 78B | 1 | 4 | 1 | 64 | 2953ms | +| 256 | 22B | 1 | 4 | 4 | 16 | 1826ms | +| | | | | | | | + + +- `TP=4` in all of entries +- Throughput is time per iteration - to complete global batch size +- Global batch size is `micro-batch-size * pp_chunks * dp_size` +- PP chunks is the number of PP stages, so each pipeline handles `micro-batch-size * pp_chunks` + +**Megatron + Deepspeed ZeRO**: + +Not yet optimized with Deepspeed team! + +| GPUs | Size | Micro-BS | PP Chunks | DP | PP | Throughput | +| ---: | ---: | -------: | --------: | --: | -: | ---------: | +| 64 | 30B | 1 | 4 | 1 | 16 | 28716ms | +| | | | | | | | + + + + +### Nodes=4 DP=1 TP=4 PP=4 + +Pre-allocate so that we can run experiments immediately and not wait for slurm to grant us resources: + +``` +salloc --account=six@gpu --nodes=4 --ntasks=4 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + +The biggest model we can fit with `micro-batch-size=1`: **7.5B** + +``` + +cd $six_ALL_CCFRWORK/code/megatron-lm/ + +CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release +VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json +MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document +SAVE_CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/gpt2-1-node + +GPUS_PER_NODE=4 +NNODES=4 + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 +NODE_RANK=0 + +NHEADS=32 +NHIDDEN=4096 +NLAYERS=36 +SEQ_LEN=1024 +VOCAB_SIZE=50257 + +MICRO_BATCH_SIZE=1 +PP_CHUNKS=4 + +PP_SIZE=4 +DP_SIZE=1 +TP_SIZE=4 + +GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$PP_CHUNKS*$DP_SIZE)) +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE + --lr 0.00015 \ + --lr-decay-style cosine \ + --min-lr 1.0e-5 \ + --train-iters 1000 \ + --lr-decay-iters 800 \ + --lr-warmup-fraction .01 \ + --weight-decay 1e-2 \ + --clip-grad 1.0 \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --fp16 \ + --checkpoint-activations \ + " + +OUTPUT_ARGS=" \ + --log-interval 10 \ + --save-interval 500 \ + --eval-interval 100 \ + --eval-iters 10 \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $SAVE_CHECKPOINT_PATH \ + --load $SAVE_CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + " + +# clear old checkpoint as it'd mismatch while we sort things out +rm -rf $six_ALL_CCFRWORK/checkpoints/gpt2-1-node + +# model size +python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l * (12*h**2 + 13*h) + (v * h) + (s * h) ) / 10**9 :.0f}B')" + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' + + +``` + +Stats: + +``` +iteration 50/ 1000 | consumed samples: 200 | elapsed time per iteration (ms): 661.3 | learning rate: +1.497E-04 | global batch size: 4 | lm loss: 8.238016E+00 | loss scale: 16384.0 | grad norm: 2.555 | +number of skipped iterations: 0 | number of nan iterations: 0 | time (ms) | forward-compute: 92.25 | +forward-recv: 65.68 | backward-compute: 239.82 | backward-send: 0.54 | backward-send-forward-recv: +4.29 | backward-params-all-reduce: 10.50 | backward-embedding-all-reduce: 204.76 | +optimizer-copy-to-main-grad: 4.47 | optimizer-unscale-and-check-inf: 5.68 | +optimizer-clip-main-grad: 8.56 | optimizer-copy-main-to-model-params: 4.41 | optimizer: 42.31 | +batch-generator: 2.70 +``` + + +### Nodes=16 DP=1 TP=4 PP=16 + + +Pre-allocate so that we can run experiments immediately and not wait for slurm to grant us resources: + +``` +salloc --account=six@gpu --nodes=16 --ntasks=16 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + +The biggest model we can fit with `micro-batch-size=1`: barely **30B** + +(30B is not in paper's table - took 39B model and reduced NHIDDEN=7168 to overcome OOM) but it still OOM'ed after 60 steps so was a bit too much. + +``` + +cd $six_ALL_CCFRWORK/code/megatron-lm/ + +CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release +VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json +MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document +SAVE_CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/gpt2-1-node + +GPUS_PER_NODE=4 +NNODES=16 + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 +NODE_RANK=0 + +NHEADS=32 +NHIDDEN=7168 +NLAYERS=48 +SEQ_LEN=1024 + +MICRO_BATCH_SIZE=1 +PP_CHUNKS=4 + +PP_SIZE=16 +DP_SIZE=1 +TP_SIZE=4 + +GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$PP_CHUNKS*$DP_SIZE)) +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE + --lr 0.00015 \ + --lr-decay-style cosine \ + --min-lr 1.0e-5 \ + --train-iters 1000 \ + --lr-decay-iters 800 \ + --lr-warmup-fraction .01 \ + --weight-decay 1e-2 \ + --clip-grad 1.0 \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --fp16 \ + --checkpoint-activations \ + " + +OUTPUT_ARGS=" \ + --log-interval 10 \ + --save-interval 500 \ + --eval-interval 100 \ + --eval-iters 10 \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $SAVE_CHECKPOINT_PATH \ + --load $SAVE_CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + " + +# clear old checkpoint as it'd mismatch while we sort things out +rm -rf $six_ALL_CCFRWORK/checkpoints/gpt2-1-node + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' + + +``` + +Stats: + + + +``` +iteration 30/ 1000 | consumed samples: 120 | elapsed time per iteration (ms): 1439.3 | learning +rate: 1.500E-04 | global batch size: 4 | lm loss: 2.667133E+01 | loss scale: 16384.0 | grad norm: +73.338 | number of skipped iterations: 1 | number of nan iterations: 0 | time (ms) | +forward-compute: 77.94 | forward-recv: 285.81 | backward-compute: 203.21 | backward-send: 0.91 | +backward-send-forward-recv: 5.44 | backward-params-all-reduce: 10.38 | +backward-embedding-all-reduce: 811.34 | optimizer-copy-to-main-grad: 4.61 | +optimizer-unscale-and-check-inf: 7.90 | optimizer-clip-main-grad: 7.91 | +optimizer-copy-main-to-model-params: 3.95 | optimizer: 43.19 | batch-generator: 2.64 +``` + +### Nodes=32 DP=1 TP=4 PP=32 + +Pre-allocate so that we can run experiments immediately and not wait for slurm to grant us resources: + +``` +salloc --account=six@gpu --nodes=32 --ntasks=32 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + +The biggest model we can fit with `micro-batch-size=1`: **50B** + +(50B is not in paper's table - took 76B model - had to change to nlayer=64 for it to work and reduced NHIDDEN=8192 to overcome OOM) but it still OOM'ed after 60 steps so was a bit too much. + +``` +perl -le 'print( (120*402780160+8*514977792)>>20)' +50023 +``` + +``` + +cd $six_ALL_CCFRWORK/code/megatron-lm/ + +CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release +VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json +MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document +SAVE_CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/gpt2-1-node + +GPUS_PER_NODE=4 +NNODES=32 + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 +NODE_RANK=0 + +NHEADS=32 +NHIDDEN=8192 +NLAYERS=64 +SEQ_LEN=1024 + +MICRO_BATCH_SIZE=1 +PP_CHUNKS=4 + +PP_SIZE=32 +DP_SIZE=1 +TP_SIZE=4 + +GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$PP_CHUNKS*$DP_SIZE)) +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE + --lr 0.00015 \ + --lr-decay-style cosine \ + --min-lr 1.0e-5 \ + --train-iters 1000 \ + --lr-decay-iters 800 \ + --lr-warmup-fraction .01 \ + --weight-decay 1e-2 \ + --clip-grad 1.0 \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --fp16 \ + --checkpoint-activations \ + " + +OUTPUT_ARGS=" \ + --log-interval 10 \ + --save-interval 500 \ + --eval-interval 100 \ + --eval-iters 10 \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $SAVE_CHECKPOINT_PATH \ + --load $SAVE_CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + " + +# clear old checkpoint as it'd mismatch while we sort things out +rm -rf $six_ALL_CCFRWORK/checkpoints/gpt2-1-node + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' + + +``` + +Stats: + +``` +iteration 50/ 1000 | consumed samples: 200 | elapsed time per iteration (ms): 2124.0 | learning +rate: 1.497E-04 | global batch size: 4 | lm loss: 1.038553E+01 | loss scale: 16384.0 | grad norm: +14.954 | number of skipped iterations: 0 | number of nan iterations: 0 | time (ms) | +forward-compute: 68.08 | forward-recv: 485.51 | backward-compute: 175.50 | backward-send: 0.85 | +backward-send-forward-recv: 5.63 | backward-params-all-reduce: 9.54 | backward-embedding-all-reduce: +1321.49 | optimizer-copy-to-main-grad: 4.19 | optimizer-unscale-and-check-inf: 21.21 | +optimizer-clip-main-grad: 8.04 | optimizer-copy-main-to-model-params: 3.98 | optimizer: 56.47 | +batch-generator: 2.72 + +``` + + + + + +### Nodes=64 DP=1 TP=4 PP=64 + +Pre-allocate so that we can run experiments immediately and not wait for slurm to grant us resources: + +``` +salloc --account=six@gpu --nodes=64 --ntasks=64 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + +The biggest model we can fit with `micro-batch-size=1`: **78B** + +(78B is not in paper's table - took 76B model - had to change to nlayers=64 for it to work) + +``` +perl -le 'print( (248*314652160+8*454899200)>>20)' +77889 +``` + +``` + +cd $six_ALL_CCFRWORK/code/megatron-lm/ + +CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release +VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json +MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document +SAVE_CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/gpt2-1-node + +GPUS_PER_NODE=4 +NNODES=64 + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 +NODE_RANK=0 + +NHEADS=32 +NHIDDEN=10240 +NLAYERS=64 +SEQ_LEN=1024 + +MICRO_BATCH_SIZE=1 +PP_CHUNKS=4 + +PP_SIZE=64 +DP_SIZE=1 +TP_SIZE=4 + +GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$PP_CHUNKS*$DP_SIZE)) +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE + --lr 0.00015 \ + --lr-decay-style cosine \ + --min-lr 1.0e-5 \ + --train-iters 1000 \ + --lr-decay-iters 800 \ + --lr-warmup-fraction .01 \ + --weight-decay 1e-2 \ + --clip-grad 1.0 \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --fp16 \ + --checkpoint-activations \ + " + +OUTPUT_ARGS=" \ + --log-interval 10 \ + --save-interval 500 \ + --eval-interval 100 \ + --eval-iters 10 \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $SAVE_CHECKPOINT_PATH \ + --load $SAVE_CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + " + +# clear old checkpoint as it'd mismatch while we sort things out +rm -rf $six_ALL_CCFRWORK/checkpoints/gpt2-1-node + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' + +``` + +Stats: + +``` +iteration 30/ 1000 | consumed samples: 120 | elapsed time per iteration (ms): 2953.3 | learning +rate: 1.500E-04 | global batch size: 4 | lm loss: 3.785040E+01 | loss scale: 16384.0 | grad norm: +47.681 | number of skipped iterations: 1 | number of nan iterations: 0 | time (ms) | +forward-compute: 53.67 | forward-recv: 746.59 | backward-compute: 134.74 | backward-send: 1.01 | +backward-send-forward-recv: 6.49 | backward-params-all-reduce: 8.29 | backward-embedding-all-reduce: +1964.85 | optimizer-copy-to-main-grad: 3.64 | optimizer-unscale-and-check-inf: 8.68 | +optimizer-clip-main-grad: 6.34 | optimizer-copy-main-to-model-params: 3.10 | optimizer: 36.80 | +batch-generator: 2.52 +``` + + + +### Nodes=64 DP=4 TP=4 PP=16 + +Let's try a smaller model with a larger batch size. + +Pre-allocate so that we can run experiments immediately and not wait for slurm to grant us resources: + +``` +salloc --account=six@gpu --nodes=64 --ntasks=64 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + +The biggest model we can fit with `micro-batch-size=1` + D4: **22B** + +``` +perl -le 'print( (48*402780160+8*514977792)>>20)' +22366 +``` + +``` + +cd $six_ALL_CCFRWORK/code/megatron-lm/ + +CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release +VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json +MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document +SAVE_CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/gpt2-1-node + +GPUS_PER_NODE=4 +NNODES=64 + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 +NODE_RANK=0 + +NHEADS=32 +NHIDDEN=8192 +NLAYERS=32 +SEQ_LEN=1024 + +MICRO_BATCH_SIZE=1 +PP_CHUNKS=4 +GAS=$PP_CHUNKS + +PP_SIZE=16 +DP_SIZE=4 +TP_SIZE=4 + +GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$PP_CHUNKS*$DP_SIZE)) +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --gas $GAS \ + --lr 0.00015 \ + --lr-decay-style cosine \ + --min-lr 1.0e-5 \ + --train-iters 1000 \ + --lr-decay-iters 800 \ + --lr-warmup-fraction .01 \ + --weight-decay 1e-2 \ + --clip-grad 1.0 \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --fp16 \ + --checkpoint-activations \ + " + +OUTPUT_ARGS=" \ + --log-interval 10 \ + --save-interval 500 \ + --eval-interval 100 \ + --eval-iters 10 \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $SAVE_CHECKPOINT_PATH \ + --load $SAVE_CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + " + +# clear old checkpoint as it'd mismatch while we sort things out +rm -rf $six_ALL_CCFRWORK/checkpoints/gpt2-1-node + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' + + +``` + +Stats: + +``` + iteration 40/ 1000 | consumed samples: 640 | elapsed time per iteration (ms): 1826.3 | learning + rate: 1.499E-04 | global batch size: 16 | lm loss: 1.290925E+01 | loss scale: 16384.0 | grad norm: + 7.607 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) | forward-compute: 80.84 | forward-recv: 225.57 | backward-compute: 172.26 | +backward-send: 0.86 | backward-send-forward-recv: 5.76 | backward-params-all-reduce: 307.62 | +backward-embedding-all-reduce: 746.14 | optimizer-copy-to-main-grad: 4.20 | +optimizer-unscale-and-check-inf: 250.90 | optimizer-clip-main-grad: 8.06 | +optimizer-copy-main-to-model-params: 3.99 | optimizer: 286.27 | batch-generator: 2.72 + + +``` + + + + +## Megatron + Deepspeed ZeRO + +**Important**: `DeepSpeedExamples/Megatron-LM-v1.1.5-ZeRO3` is not in sync with M-LM master - so several config args don't match. + +Status: Unoptimized + +### Nodes=16 + + +``` +salloc --account=six@gpu --nodes=16 --ntasks=16 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + +Todo: + +46B experiment: +NHEADS=32 +NHIDDEN=9216 +NLAYERS=48 +SEQ_LEN=1024 +VOCAB_SIZE=50257 + + +``` + +cd $six_ALL_CCFRWORK/code/DeepSpeedExamples/Megatron-LM-v1.1.5-ZeRO3 + +CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release +VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json +MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document +SAVE_CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/gpt2-meg-ds + +GPUS_PER_NODE=4 +NNODES=16 + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 +NODE_RANK=0 + +NHEADS=32 +NHIDDEN=7168 +NLAYERS=48 +SEQ_LEN=1024 +VOCAB_SIZE=50257 + +MICRO_BATCH_SIZE=16 +PP_CHUNKS=4 + +PP_SIZE=16 +DP_SIZE=2 +TP_SIZE=2 + +GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$PP_CHUNKS*$DP_SIZE)) +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +# --micro-batch-size $MICRO_BATCH_SIZE \ +# --lr-warmup-fraction .01 \ +# --global-batch-size $GLOBAL_BATCH_SIZE +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --batch-size $MICRO_BATCH_SIZE \ + --train-iters 1000 \ + --lr-decay-iters 800 \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --lr 1.5e-4 \ + --lr-decay-style cosine \ + --min-lr 1.0e-5 \ + --weight-decay 1e-2 \ + --clip-grad 1.0 \ + --warmup 0.01 \ + --fp16 \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval 500 \ + --eval-interval 100 \ + --eval-iters 10 \ + " + +#ZeRO Configs +gradient_accumulation_steps=1 +reduce_bucket_size=$(($NHIDDEN*$NHIDDEN)) +stage3_prefetch_bucket_size=$(($NHIDDEN*$NHIDDEN*9/10)) +stage3_param_persistence_threshold=$((10*$NHIDDEN)) + +# Here it is different from the other setup +train_batch_size=$(($WORLD_SIZE*$MICRO_BATCH_SIZE*$gradient_accumulation_steps)) + +config_json="./ds_zero_stage_3_config.json" + +# "train_batch_size": $train_batch_size, + +cat < $config_json +{ + "gradient_accumulation_steps": $gradient_accumulation_steps, + "steps_per_print": 10, + "zero_optimization": { + "stage": 3, + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_prefetch_bucket_size": $stage3_prefetch_bucket_size, + "stage3_param_persitence_threshold": $stage3_param_persistence_threshold, + "reduce_bucket_size": $reduce_bucket_size, + "contiguous_gradients": true + }, + "gradient_clipping": 1.0, + "fp16": { + "enabled": true, + "loss_scale": 0, + "initial_scale_power": 10, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "wall_clock_breakdown": false, + "zero_allow_untested_optimizer": false +} +EOT + +MP_SIZE=$TP_SIZE + +stage=3 +reduce_scatter=true +contigious_gradients=true +rbs=50000000 +agbs=5000000000 + +#Activation Checkpointing and Contigious Memory +chkp_layers=1 +PA=true +PA_CPU=true +CC=true +SYNCHRONIZE=true +PROFILE=false + +# TiledLinear splits, 0 is disable +TILED_LINEAR="false" +TILE_DIM=1 + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${stage} \ + --zero-reduce-bucket-size ${rbs} \ + --zero-allgather-bucket-size ${agbs} \ + " + +if [ "${contigious_gradients}" = "true" ]; then +DEEPSPEED_ARGS="${DEEPSPEED_ARGS} \ + --zero-contigious-gradients" +fi + +if [ "${reduce_scatter}" = "true" ]; then +DEEPSPEED_ARGS="${DEEPSPEED_ARGS} \ + --zero-reduce-scatter" +fi + +CHKP_ARGS=" \ +--checkpoint-activations \ +--deepspeed-activation-checkpointing \ +--checkpoint-num-layers ${chkp_layers}" + +if [ "${PA}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} --partition-activations" +fi + +if [ "${PA_CPU}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --checkpoint-in-cpu" +fi + +if [ "${SYNCHRONIZE}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --synchronize-each-layer" +fi + +if [ "${CC}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --contigious-checkpointing" +fi + +if [ "${PROFILE}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --profile-backward" +fi + +if [ "${TILED_LINEAR}" = "true" ]; then +tile_opt="${tile_opt} \ + --memory-centric-tiled-linear \ + --tile-factor=${TILE_DIM}" +fi + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +# --tensor-model-parallel-size $TP_SIZE \ +# --pipeline-model-parallel-size $PP_SIZE \ +export CMD=" \ + `pwd`/pretrain_gpt2.py \ + --model-parallel-size $TP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $SAVE_CHECKPOINT_PATH \ + --load $SAVE_CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + $CHKP_ARGS \ + " + +rm -rf $six_ALL_CCFRWORK/checkpoints/gpt2-meg-ds + +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' + +``` + +Stats: + +``` +iteration 20/ 1000 | elapsed time per iteration (ms): 28716.0 | learning rate: 1.500E-04 | lm loss: +2.324108E+01 | loss scale: 1024.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) | forward: 5495.35 | backward: 22976.72 | backward-backward: 22976.69 | +backward-allreduce: 0.00 | optimizer: 243.03 | batch generator: 1.00 Effective Tera Flops per GPU: +0.21 and total parameters 29.998 B +``` + + +## Megatron + Deepspeed 3D Parallelism + +**Important**: `DeepSpeedExamples/Megatron-LM-v1.1.5-3D_parallelism` is not in sync with M-LM master - so several config args don't match. + +Status: Unoptimized + +### Nodes=16 + + +``` +salloc --account=six@gpu --nodes=16 --ntasks=16 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + + +``` + +cd $six_ALL_CCFRWORK/code/DeepSpeedExamples/Megatron-LM-v1.1.5-3D_parallelism + +CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release +VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json +MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document +SAVE_CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/gpt2-meg-ds + +GPUS_PER_NODE=4 +NNODES=16 + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 +NODE_RANK=0 + +NHEADS=32 +NHIDDEN=7168 +NLAYERS=48 +SEQ_LEN=1024 +VOCAB_SIZE=50257 + +MICRO_BATCH_SIZE=1 +PP_CHUNKS=4 +GAS=$PP_CHUNKS + +PP_SIZE=16 +DP_SIZE=1 +TP_SIZE=4 + +GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$PP_CHUNKS*$DP_SIZE)) +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +# --micro-batch-size $MICRO_BATCH_SIZE \ +# --lr-warmup-fraction .01 \ +# --global-batch-size $GLOBAL_BATCH_SIZE +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --batch-size $MICRO_BATCH_SIZE \ + --gas $GAS \ + --train-iters 1000 \ + --lr-decay-iters 800 \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --lr 1.5e-4 \ + --lr-decay-style cosine \ + --min-lr 1.0e-5 \ + --weight-decay 1e-2 \ + --clip-grad 1.0 \ + --warmup 0.01 \ + --fp16 \ + " + +OUTPUT_ARGS=" \ + --log-interval 10 \ + --save-interval 500 \ + --eval-interval 100 \ + --eval-iters 10 \ + " + +#ZeRO Configs +gradient_accumulation_steps=1 +reduce_bucket_size=$(($NHIDDEN*$NHIDDEN)) +stage3_prefetch_bucket_size=$(($NHIDDEN*$NHIDDEN*9/10)) +stage3_param_persistence_threshold=$((10*$NHIDDEN)) +train_batch_size=$(($DP_SIZE*$MICRO_BATCH_SIZE*$gradient_accumulation_steps)) + +config_json="./ds_config.json" + +cat < $config_json +{ + "train_batch_size": $train_batch_size, + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "gradient_accumulation_steps": $gradient_accumulation_steps, + "steps_per_print": 10, + "gradient_clipping": 1.0, + "fp16": { + "enabled": true, + "loss_scale": 0, + "initial_scale_power": 10, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "wall_clock_breakdown": false, + "zero_allow_untested_optimizer": false +} +EOT + +MP_SIZE=$TP_SIZE + +stage=0 +reduce_scatter=true +contigious_gradients=true +rbs=50000000 +agbs=5000000000 + +#Activation Checkpointing and Contigious Memory +chkp_layers=1 +PA=true +PA_CPU=false +CC=true +SYNCHRONIZE=true +PROFILE=false + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${stage} \ + --zero-reduce-bucket-size ${rbs} \ + --zero-allgather-bucket-size ${agbs} \ + " + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${stage} \ + --zero-reduce-bucket-size ${rbs} \ + --zero-allgather-bucket-size ${agbs} \ + " + +if [ "${contigious_gradients}" = "true" ]; then +DEEPSPEED_ARGS="${DEEPSPEED_ARGS} \ + --zero-contigious-gradients" +fi + +if [ "${reduce_scatter}" = "true" ]; then +DEEPSPEED_ARGS="${DEEPSPEED_ARGS} \ + --zero-reduce-scatter" +fi + +CHKP_ARGS=" \ +--checkpoint-activations \ +--checkpoint-num-layers ${chkp_layers}" + +if [ "${PA}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --partition-activations" +fi + +if [ "${PA_CPU}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --checkpoint-in-cpu" +fi + +if [ "${SYNCHRONIZE}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --synchronize-each-layer" +fi + +if [ "${CC}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --contigious-checkpointing" +fi + +if [ "${PROFILE}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --profile-backward" +fi + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +# --tensor-model-parallel-size $TP_SIZE \ +# --pipeline-model-parallel-size $PP_SIZE \ +export CMD=" \ + `pwd`/pretrain_gpt2.py \ + --model-parallel-size $TP_SIZE \ + --pipe-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $SAVE_CHECKPOINT_PATH \ + --load $SAVE_CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + $CHKP_ARGS \ + " + +rm -rf $six_ALL_CCFRWORK/checkpoints/gpt2-meg-ds + +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' + +# can't figure out how to launch from salloc +# +# r10i5n[5-6],r10i6n[4-5,7-8],r10i7n[0,4-5],r11i3n[3-6],r13i1n[2-4] +function makehostfile() { +perl -e '$slots=split /,/, $ENV{"SLURM_STEP_GPUS"}; +$slots=4 if $slots==0; # workaround +while ($ENV{"SLURM_JOB_NODELIST"} =~ m/(\w+)(?:\[([\d-,]+)\])?,?/msg) { +$b=$1; $s=$2||q[""]; $s=~s/-/../g; +print map { "$b$_ slots=$slots\n" } eval $s }' +} +makehostfile > hostfile +# +# +# srun --jobid $SLURM_JOBID deepspeed -H `pwd`/hostfile --num_nodes ${NNODES} --num_gpus ${GPUS_PER_NODE} $CMD +# + +# to kill hanging python processes on all nodes at once +# srun pkill python + +``` + +Stats: +``` +iteration 650/ 1000 | elapsed time per iteration (ms): 1210.1 | learning rate: 1.450E-05 | lm loss: +7.287670E+00 | loss scale: 8192.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) | forward: 0.00 | backward: 0.00 | optimizer: 0.00 | batch generator: 0.00 + +``` + +``` +| N/A 50C P0 181W / 300W | 13236MiB / 32510MiB | 99% Default | +| 0 N/A N/A 72371 C .../conda/hf-prod/bin/python 13233MiB | +| 1 N/A N/A 72372 C .../conda/hf-prod/bin/python 13193MiB | +| 2 N/A N/A 72373 C .../conda/hf-prod/bin/python 13161MiB | +| 3 N/A N/A 72374 C .../conda/hf-prod/bin/python 13169MiB | +``` + +## HF + Deepspeed ZeRO + +### Nodes=16 ZeRO-2 + + +``` +salloc --account=six@gpu --nodes=16 --ntasks=16 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + +32GB nodes + +This works - at about 25GB / gpus - very slow 20s/it + +Model size: 3.5B + +Higher model the 40GB/gpu limit is passed and processes get killed. + +We don't have zero.Init() here so the whole model is loaded onto each process - not possible to scale. + +This memory gets released afterwards, but we don't have enough to bypass that hump. + +``` + +# use custom PR branch to handle the model creation on the fly +cd $six_ALL_CCFRWORK/code/transformers-clm-any-model-config/ + +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics + +MODEL=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron-gpt2-345m +DATASET="stas/openwebtext-10k" + +GPUS_PER_NODE=4 +NNODES=16 + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +NHEADS=32 +NHIDDEN=3072 +NLAYERS=30 +SEQ_LEN=1024 +VOCAB_SIZE=50257 + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + + +config_json="./ds_z2_no_offload.json" +cat < $config_json +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": "auto", + "eps": "auto", + "weight_decay": "auto" + } + }, + + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + }, + + "zero_optimization": { + "stage": 2, + "allgather_partitions": true, + "allgather_bucket_size": 2e8, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 2e8, + "contiguous_gradients": true, + "cpu_offload": true + }, + + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} +EOT + +export PYTHONPATH=src +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +export USE_TF=0 + +# deepspeed -H `pwd`/hostfile-exp2 --num_nodes $NNODES --num_gpus $GPUS_PER_NODE \ +export CMD=" \ + examples/pytorch/language-modeling/run_clm.py \ + --model_type gpt2 \ + --tokenizer_name gpt2 \ + --config_overrides "n_embd=$NHIDDEN,n_head=$NHEADS,n_layer=$NLAYERS,n_positions=$SEQ_LEN" \ + --dataset_name $DATASET \ + --output_dir output_dir \ + --overwrite_output_dir \ + --do_train \ + --do_eval \ + --max_train_samples 10000 \ + --max_eval_samples 1000 \ + --per_device_train_batch_size 4 \ + --per_device_eval_batch_size 4 \ + --num_train_epochs 1 \ + --warmup_steps 8 \ + --fp16 \ + --report_to none \ + --deepspeed $config_json \ + " + +# model size +python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l * (12*h**2 + 13*h) + (v * h) + (s * h) ) / 10**9 :.0f}B')" + +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' + + +``` + +Stats: + +``` + + +``` + + +## Node16 ZeRO-3 + CPU Offload + +32GB nodes + +Model size: 7B + + +``` + +# use custom PR branch to handle the model creation on the fly +cd $six_ALL_CCFRWORK/code/transformers-clm-any-model-config/ + +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics + +MODEL=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron-gpt2-345m +DATASET="stas/openwebtext-10k" + +GPUS_PER_NODE=4 +NNODES=2 + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +NHEADS=32 +NHIDDEN=1024 +NLAYERS=10 +SEQ_LEN=1024 +VOCAB_SIZE=50257 + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + + +config_json="./ds_z3_cpu_offload.json" +cat < $config_json +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": "auto", + "eps": "auto", + "weight_decay": "auto" + } + }, + + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + }, + + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "offload_param": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e14, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_fp16_weights_on_model_save": true + }, + + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} +EOT + +export PYTHONPATH=src +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +export USE_TF=0 + +# deepspeed -H `pwd`/hostfile-exp2 --num_nodes $NNODES --num_gpus $GPUS_PER_NODE \ +export CMD=" \ + examples/pytorch/language-modeling/run_clm.py \ + --model_type gpt2 \ + --tokenizer_name gpt2 \ + --config_overrides "n_embd=$NHIDDEN,n_head=$NHEADS,n_layer=$NLAYERS,n_positions=$SEQ_LEN" \ + --dataset_name $DATASET \ + --output_dir output_dir \ + --overwrite_output_dir \ + --do_train \ + --do_eval \ + --max_train_samples 10000 \ + --max_eval_samples 1000 \ + --per_device_train_batch_size 4 \ + --per_device_eval_batch_size 4 \ + --num_train_epochs 1 \ + --warmup_steps 8 \ + --fp16 \ + --report_to none \ + --deepspeed $config_json \ + " + +# model size +python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l * (12*h**2 + 13*h) + (v * h) + (s * h) ) / 10**9 :.0f}B')" + +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' + +``` + + +Stats: + +``` + +``` + + +### Trying deepspeed launcher again + + +``` + +#!/bin/bash +#SBATCH --job-name=hf_ds_gpt2_multi_node_test +#SBATCH --nodes=2 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@gpu + +# use custom PR branch to handle the model creation on the fly +cd $six_ALL_CCFRWORK/code/transformers-clm-any-model-config/ + +source $six_ALL_CCFRWORK/start-prod + +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics + +MODEL=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron-gpt2-345m +DATASET="stas/openwebtext-10k" + +GPUS_PER_NODE=4 +NNODES=2 + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +NHEADS=32 +NHIDDEN=1024 +NLAYERS=10 +SEQ_LEN=1024 +VOCAB_SIZE=50257 + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + + +config_json="./ds_z3_cpu_offload.json" +cat < $config_json +{ + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "loss_scale_window": 1000, + "initial_scale_power": 16, + "hysteresis": 2, + "min_loss_scale": 1 + }, + + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": "auto", + "eps": "auto", + "weight_decay": "auto" + } + }, + + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": "auto", + "warmup_max_lr": "auto", + "warmup_num_steps": "auto" + } + }, + + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "offload_param": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e14, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_fp16_weights_on_model_save": true + }, + + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "steps_per_print": 2000, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} +EOT + +export PYTHONPATH=src +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +export USE_TF=0 + +export CMD=" \ + deepspeed --num_nodes $NNODES --num_gpus $GPUS_PER_NODE \ + examples/pytorch/language-modeling/run_clm.py \ + --model_type gpt2 \ + --tokenizer_name gpt2 \ + --config_overrides "n_embd=$NHIDDEN,n_head=$NHEADS,n_layer=$NLAYERS,n_positions=$SEQ_LEN" \ + --dataset_name $DATASET \ + --output_dir output_dir \ + --overwrite_output_dir \ + --do_train \ + --do_eval \ + --max_train_samples 10000 \ + --max_eval_samples 1000 \ + --per_device_train_batch_size 4 \ + --per_device_eval_batch_size 4 \ + --num_train_epochs 1 \ + --warmup_steps 8 \ + --fp16 \ + --report_to none \ + --deepspeed $config_json \ + " + +# model size +python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l * (12*h**2 + 13*h) + (v * h) + (s * h) ) / 10**9 :.0f}B')" + +#srun --jobid $SLURM_JOBID bash -c '$CMD' +srun --jobid $SLURM_JOBID bash -c '$CMD' + + + +``` diff --git a/experiments/gpt2-meg-ds-3d-old/meg_ds_3d_gpt2_perf_n16.out b/experiments/gpt2-meg-ds-3d-old/meg_ds_3d_gpt2_perf_n16.out new file mode 100644 index 0000000000000000000000000000000000000000..b160e5b341dfc1d2cdc3eb5095af4b26c8c35751 --- /dev/null +++ b/experiments/gpt2-meg-ds-3d-old/meg_ds_3d_gpt2_perf_n16.out @@ -0,0 +1,12069 @@ +**************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +***************************************** +Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +***************************************** +using world size: 64 and model-parallel size: 4 +using torch.float16 for parameters ... +-------------------- arguments -------------------- + adam_beta1 ...................... 0.9 + adam_beta2 ...................... 0.999 + adam_eps ........................ 1e-08 + adlr_autoresume ................. False + adlr_autoresume_interval ........ 1000 + apply_query_key_layer_scaling ... False + apply_residual_connection_post_layernorm False + attention_dropout ............... 0.1 + attention_softmax_in_fp32 ....... False + batch_size ...................... 4 + bert_load ....................... None + bias_dropout_fusion ............. False + bias_gelu_fusion ................ False + block_data_path ................. None + checkpoint_activations .......... True + checkpoint_in_cpu ............... False + checkpoint_num_layers ........... 1 + clip_grad ....................... 1.0 + contigious_checkpointing ........ True + cpu_optimizer ................... False + cpu_torch_adam .................. False + data_impl ....................... mmap + data_path ....................... /gpfsscratch/rech/eha/commun/datasets-custom/openwebtext-10k/meg-gpt2_text_document + DDP_impl ........................ local + deepscale ....................... False + deepscale_config ................ None + deepspeed ....................... True + deepspeed_activation_checkpointing False + deepspeed_config ................ ./ds_config.json + deepspeed_mpi ................... False + distribute_checkpointed_activations False + distributed_backend ............. nccl + dynamic_loss_scale .............. True + eod_mask_loss ................... False + eval_interval ................... 100 + eval_iters ...................... 10 + exit_interval ................... None + faiss_use_gpu ................... False + finetune ........................ False + fp16 ............................ True + fp16_lm_cross_entropy ........... False + fp32_allreduce .................. False + gas ............................. 256 + hidden_dropout .................. 0.1 + hidden_size ..................... 8192 + hysteresis ...................... 2 + ict_head_size ................... None + ict_load ........................ None + indexer_batch_size .............. 128 + indexer_log_interval ............ 1000 + init_method_std ................. 0.02 + layernorm_epsilon ............... 1e-05 + lazy_mpu_init ................... None + load ............................ /gpfsscratch/rech/eha/commun/checkpoints/gpt2-meg-ds + local_rank ...................... 0 + log_interval .................... 1 + loss_scale ...................... None + loss_scale_window ............... 1000 + lr .............................. 0.00015 + lr_decay_iters .................. 800 + lr_decay_style .................. cosine + make_vocab_size_divisible_by .... 128 + mask_prob ....................... 0.15 + max_position_embeddings ......... 1024 + merge_file ...................... /gpfsscratch/rech/eha/commun/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release/gpt2-merges.txt + min_lr .......................... 1e-05 + min_scale ....................... 1 + mmap_warmup ..................... False + model_parallel_size ............. 4 + no_load_optim ................... False + no_load_rng ..................... False + no_save_optim ................... False + no_save_rng ..................... False + num_attention_heads ............. 32 + num_layers ...................... 64 + num_unique_layers ............... None + num_workers ..................... 2 + onnx_safe ....................... None + openai_gelu ..................... False + override_lr_scheduler ........... False + param_sharing_style ............. grouped + params_dtype .................... torch.float16 + partition_activations ........... True + pipe_parallel_size .............. 16 + profile_backward ................ False + query_in_block_prob ............. 0.1 + rank ............................ 0 + report_topk_accuracies .......... [] + reset_attention_mask ............ False + reset_position_ids .............. False + save ............................ /gpfsscratch/rech/eha/commun/checkpoints/gpt2-meg-ds + save_interval ................... 500 + scaled_masked_softmax_fusion .... False + scaled_upper_triang_masked_softmax_fusion False + seed ............................ 1234 + seq_length ...................... 1024 + short_seq_prob .................. 0.1 + split ........................... 949,50,1 + synchronize_each_layer .......... True + tensorboard_dir ................. None + titles_data_path ................ None + tokenizer_type .................. GPT2BPETokenizer + train_iters ..................... 1000 + use_checkpoint_lr_scheduler ..... False + use_cpu_initialization .......... False + use_one_sent_docs ............... False + vocab_file ...................... /gpfsscratch/rech/eha/commun/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release/gpt2-vocab.json + warmup .......................... 0.01 + weight_decay .................... 0.01 + world_size ...................... 64 + zero_allgather_bucket_size ...... 5000000000 + zero_contigious_gradients ....... True + zero_reduce_bucket_size ......... 50000000 + zero_reduce_scatter ............. True + zero_stage ...................... 0 +---------------- end of arguments ---------------- +> building GPT2BPETokenizer tokenizer ... + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +> initializing torch distributed ... +> initializing model parallel with size 4 +MPU DP: [0] +MPU DP: [1] +MPU DP: [2] +MPU DP: [3] +MPU DP: [4] +MPU DP: [5] +MPU DP: [6] +MPU DP: [7] +MPU DP: [8] +MPU DP: [9] +MPU DP: [10] +MPU DP: [11] +MPU DP: [12] +MPU DP: [13] +MPU DP: [14] +MPU DP: [15] +MPU DP: [16] +MPU DP: [17] +MPU DP: [18] +MPU DP: [19] +MPU DP: [20] +MPU DP: [21] +MPU DP: [22] +MPU DP: [23] +MPU DP: [24] +MPU DP: [25] +MPU DP: [26] +MPU DP: [27] +MPU DP: [28] +MPU DP: [29] +MPU DP: [30] +MPU DP: [31] +MPU DP: [32] +MPU DP: [33] +MPU DP: [34] +MPU DP: [35] +MPU DP: [36] +MPU DP: [37] +MPU DP: [38] +MPU DP: [39] +MPU DP: [40] +MPU DP: [41] +MPU DP: [42] +MPU DP: [43] +MPU DP: [44] +MPU DP: [45] +MPU DP: [46] +MPU DP: [47] +MPU DP: [48] +MPU DP: [49] +MPU DP: [50] +MPU DP: [51] +MPU DP: [52] +MPU DP: [53] +MPU DP: [54] +MPU DP: [55] +MPU DP: [56] +MPU DP: [57] +MPU DP: [58] +MPU DP: [59] +MPU DP: [60] +MPU DP: [61] +MPU DP: [62] +MPU DP: [63] +MPU PP: [0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60] +MPU PP: [1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61] +MPU PP: [2, 6, 10, 14, 18, 22, 26, 30, 34, 38, 42, 46, 50, 54, 58, 62] +MPU PP: [3, 7, 11, 15, 19, 23, 27, 31, 35, 39, 43, 47, 51, 55, 59, 63] +MPU IO: [0, 60] +MPU MP: [0, 1, 2, 3] +MPU MP: [4, 5, 6, 7] +MPU MP: [8, 9, 10, 11] +MPU MP: [12, 13, 14, 15] +MPU MP: [16, 17, 18, 19] +MPU MP: [20, 21, 22, 23] +MPU MP: [24, 25, 26, 27] +MPU MP: [28, 29, 30, 31] +MPU MP: [32, 33, 34, 35] +MPU MP: [36, 37, 38, 39] +MPU MP: [40, 41, 42, 43] +MPU MP: [44, 45, 46, 47] +MPU MP: [48, 49, 50, 51] +MPU MP: [52, 53, 54, 55] +MPU MP: [56, 57, 58, 59] +MPU MP: [60, 61, 62, 63] +> setting random seeds to 1234 ... +> initializing model parallel cuda seeds on global rank 0, model parallel rank 0, and data parallel rank 0 with model parallel seed: 3952 and data parallel seed: 1234 +building GPT2 model ... +SEED_LAYERS=False BASE_SEED=1234 SEED_FN=None +Using topology: {ProcessCoord(pipe=0, data=0, model=0): 0, ProcessCoord(pipe=0, data=0, model=1): 1, ProcessCoord(pipe=0, data=0, model=2): 2, ProcessCoord(pipe=0, data=0, model=3): 3, ProcessCoord(pipe=1, data=0, model=0): 4, ProcessCoord(pipe=1, data=0, model=1): 5, ProcessCoord(pipe=1, data=0, model=2): 6, ProcessCoord(pipe=1, data=0, model=3): 7, ProcessCoord(pipe=2, data=0, model=0): 8, ProcessCoord(pipe=2, data=0, model=1): 9, ProcessCoord(pipe=2, data=0, model=2): 10, ProcessCoord(pipe=2, data=0, model=3): 11, ProcessCoord(pipe=3, data=0, model=0): 12, ProcessCoord(pipe=3, data=0, model=1): 13, ProcessCoord(pipe=3, data=0, model=2): 14, ProcessCoord(pipe=3, data=0, model=3): 15, ProcessCoord(pipe=4, data=0, model=0): 16, ProcessCoord(pipe=4, data=0, model=1): 17, ProcessCoord(pipe=4, data=0, model=2): 18, ProcessCoord(pipe=4, data=0, model=3): 19, ProcessCoord(pipe=5, data=0, model=0): 20, ProcessCoord(pipe=5, data=0, model=1): 21, ProcessCoord(pipe=5, data=0, model=2): 22, ProcessCoord(pipe=5, data=0, model=3): 23, ProcessCoord(pipe=6, data=0, model=0): 24, ProcessCoord(pipe=6, data=0, model=1): 25, ProcessCoord(pipe=6, data=0, model=2): 26, ProcessCoord(pipe=6, data=0, model=3): 27, ProcessCoord(pipe=7, data=0, model=0): 28, ProcessCoord(pipe=7, data=0, model=1): 29, ProcessCoord(pipe=7, data=0, model=2): 30, ProcessCoord(pipe=7, data=0, model=3): 31, ProcessCoord(pipe=8, data=0, model=0): 32, ProcessCoord(pipe=8, data=0, model=1): 33, ProcessCoord(pipe=8, data=0, model=2): 34, ProcessCoord(pipe=8, data=0, model=3): 35, ProcessCoord(pipe=9, data=0, model=0): 36, ProcessCoord(pipe=9, data=0, model=1): 37, ProcessCoord(pipe=9, data=0, model=2): 38, ProcessCoord(pipe=9, data=0, model=3): 39, ProcessCoord(pipe=10, data=0, model=0): 40, ProcessCoord(pipe=10, data=0, model=1): 41, ProcessCoord(pipe=10, data=0, model=2): 42, ProcessCoord(pipe=10, data=0, model=3): 43, ProcessCoord(pipe=11, data=0, model=0): 44, ProcessCoord(pipe=11, data=0, model=1): 45, ProcessCoord(pipe=11, data=0, model=2): 46, ProcessCoord(pipe=11, data=0, model=3): 47, ProcessCoord(pipe=12, data=0, model=0): 48, ProcessCoord(pipe=12, data=0, model=1): 49, ProcessCoord(pipe=12, data=0, model=2): 50, ProcessCoord(pipe=12, data=0, model=3): 51, ProcessCoord(pipe=13, data=0, model=0): 52, ProcessCoord(pipe=13, data=0, model=1): 53, ProcessCoord(pipe=13, data=0, model=2): 54, ProcessCoord(pipe=13, data=0, model=3): 55, ProcessCoord(pipe=14, data=0, model=0): 56, ProcessCoord(pipe=14, data=0, model=1): 57, ProcessCoord(pipe=14, data=0, model=2): 58, ProcessCoord(pipe=14, data=0, model=3): 59, ProcessCoord(pipe=15, data=0, model=0): 60, ProcessCoord(pipe=15, data=0, model=1): 61, ProcessCoord(pipe=15, data=0, model=2): 62, ProcessCoord(pipe=15, data=0, model=3): 63} +[2021-05-27 19:49:22,389] [INFO] [module.py:360:_partition_layers] Partitioning pipeline stages with method type:transformer +stage=0 layers=6 + 0: EmbeddingPipe + 1: + 2: ParallelTransformerLayerPipe + 3: ParallelTransformerLayerPipe + 4: ParallelTransformerLayerPipe + 5: ParallelTransformerLayerPipe +stage=1 layers=4 + 6: ParallelTransformerLayerPipe + 7: ParallelTransformerLayerPipe + 8: ParallelTransformerLayerPipe + 9: ParallelTransformerLayerPipe +stage=2 layers=4 + 10: ParallelTransformerLayerPipe + 11: ParallelTransformerLayerPipe + 12: ParallelTransformerLayerPipe + 13: ParallelTransformerLayerPipe +stage=3 layers=4 + 14: ParallelTransformerLayerPipe + 15: ParallelTransformerLayerPipe + 16: ParallelTransformerLayerPipe + 17: ParallelTransformerLayerPipe +stage=4 layers=4 + 18: ParallelTransformerLayerPipe + 19: ParallelTransformerLayerPipe + 20: ParallelTransformerLayerPipe + 21: ParallelTransformerLayerPipe +stage=5 layers=4 + 22: ParallelTransformerLayerPipe + 23: ParallelTransformerLayerPipe + 24: ParallelTransformerLayerPipe + 25: ParallelTransformerLayerPipe +stage=6 layers=4 + 26: ParallelTransformerLayerPipe + 27: ParallelTransformerLayerPipe + 28: ParallelTransformerLayerPipe + 29: ParallelTransformerLayerPipe +stage=7 layers=4 + 30: ParallelTransformerLayerPipe + 31: ParallelTransformerLayerPipe + 32: ParallelTransformerLayerPipe + 33: ParallelTransformerLayerPipe +stage=8 layers=4 + 34: ParallelTransformerLayerPipe + 35: ParallelTransformerLayerPipe + 36: ParallelTransformerLayerPipe + 37: ParallelTransformerLayerPipe +stage=9 layers=4 + 38: ParallelTransformerLayerPipe + 39: ParallelTransformerLayerPipe + 40: ParallelTransformerLayerPipe + 41: ParallelTransformerLayerPipe +stage=10 layers=4 + 42: ParallelTransformerLayerPipe + 43: ParallelTransformerLayerPipe + 44: ParallelTransformerLayerPipe + 45: ParallelTransformerLayerPipe +stage=11 layers=4 + 46: ParallelTransformerLayerPipe + 47: ParallelTransformerLayerPipe + 48: ParallelTransformerLayerPipe + 49: ParallelTransformerLayerPipe +stage=12 layers=4 + 50: ParallelTransformerLayerPipe + 51: ParallelTransformerLayerPipe + 52: ParallelTransformerLayerPipe + 53: ParallelTransformerLayerPipe +stage=13 layers=4 + 54: ParallelTransformerLayerPipe + 55: ParallelTransformerLayerPipe + 56: ParallelTransformerLayerPipe + 57: ParallelTransformerLayerPipe +stage=14 layers=4 + 58: ParallelTransformerLayerPipe + 59: ParallelTransformerLayerPipe + 60: ParallelTransformerLayerPipe + 61: ParallelTransformerLayerPipe +stage=15 layers=8 + 62: ParallelTransformerLayerPipe + 63: ParallelTransformerLayerPipe + 64: ParallelTransformerLayerPipe + 65: ParallelTransformerLayerPipe + 66: + 67: FusedLayerNorm + 68: EmbeddingPipe + 69: fp16_to_fp32 + loss: CrossEntropy + > number of parameters on model parallel rank 0: 805560320 + > number of parameters on model parallel rank 3: 805560320 + > number of parameters on model parallel rank 0: 805560320 +r6i4n5:37339:37339 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.8.41<0> [1]ib1:10.149.8.41<0> + > number of parameters on model parallel rank 2: 805560320 + > number of parameters on model parallel rank 1: 805560320 + > number of parameters on model parallel rank 3: 805560320 + > number of parameters on model parallel rank 2: 805560320 + > number of parameters on model parallel rank 1: 805560320 + > number of parameters on model parallel rank 3: 805560320 + > number of parameters on model parallel rank 2: 805560320 > number of parameters on model parallel rank 3: 805560320 + + > number of parameters on model parallel rank 1: 805560320 + > number of parameters on model parallel rank 0: 805560320 > number of parameters on model parallel rank 1: 805560320 + + > number of parameters on model parallel rank 1: 805560320 + > number of parameters on model parallel rank 3: 805560320 +r6i4n5:37339:37339 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation + > number of parameters on model parallel rank 3: 805560320 + > number of parameters on model parallel rank 1: 805560320 + > number of parameters on model parallel rank 0: 805560320 + > number of parameters on model parallel rank 1: 805560320 + > number of parameters on model parallel rank 2: 805560320 + > number of parameters on model parallel rank 3: 805560320 + > number of parameters on model parallel rank 0: 805560320 + > number of parameters on model parallel rank 3: 805560320 + > number of parameters on model parallel rank 0: 805560320 + > number of parameters on model parallel rank 3: 805560320 + > number of parameters on model parallel rank 3: 805560320 + > number of parameters on model parallel rank 3: 805560320 + > number of parameters on model parallel rank 1: 805560320 + > number of parameters on model parallel rank 1: 805560320 + > number of parameters on model parallel rank 0: 805560320 + > number of parameters on model parallel rank 2: 805560320 + > number of parameters on model parallel rank 1: 805560320 + > number of parameters on model parallel rank 0: 805560320 + > number of parameters on model parallel rank 2: 805560320 + > number of parameters on model parallel rank 1: 805560320 +r7i5n3:79994:79994 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.88<0> [1]ib1:10.149.0.88<0> +r6i4n5:37339:37339 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.8.41<0> +r6i4n5:37339:37339 [0] NCCL INFO Using network IB + > number of parameters on model parallel rank 0: 805560320 + > number of parameters on model parallel rank 2: 805560320 + > number of parameters on model parallel rank 1: 805560320 +r7i4n5:46158:46158 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.80<0> [1]ib1:10.149.0.80<0> +r7i3n0:38601:38601 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.66<0> [1]ib1:10.149.0.66<0> +NCCL version 2.7.8+cuda10.2 +r7i4n5:46161:46161 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.80<0> [1]ib1:10.149.0.80<0> +r7i1n3:4943:4943 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.51<0> [1]ib1:10.149.0.51<0> +r7i3n0:38599:38599 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.66<0> [1]ib1:10.149.0.66<0> +r9i1n5:40814:40814 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.117<0> [1]ib1:10.149.0.117<0> +r9i1n6:58315:58315 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.118<0> [1]ib1:10.149.0.118<0> +r9i1n6:58313:58313 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.118<0> [1]ib1:10.149.0.118<0> +r9i1n4:24821:24821 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.116<0> [1]ib1:10.149.0.116<0> +r6i4n5:37342:37342 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.8.41<0> [1]ib1:10.149.8.41<0> +r7i5n3:79994:79994 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation + > number of parameters on model parallel rank 2: 805560320 +r7i7n1:68405:68405 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.103<0> [1]ib1:10.149.0.103<0> + > number of parameters on model parallel rank 0: 805560320 +r7i7n1:68406:68406 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.103<0> [1]ib1:10.149.0.103<0> + > number of parameters on model parallel rank 3: 805560320 + > number of parameters on model parallel rank 2: 805560320 +r7i3n0:38601:38601 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i3n0:38598:38598 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.66<0> [1]ib1:10.149.0.66<0> +r7i5n3:79997:79997 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.88<0> [1]ib1:10.149.0.88<0> +r7i7n2:57608:57608 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.104<0> [1]ib1:10.149.0.104<0> +r7i7n2:57609:57609 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.104<0> [1]ib1:10.149.0.104<0> +r7i4n5:46158:46158 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r6i4n5:37341:37341 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.8.41<0> [1]ib1:10.149.8.41<0> +r6i4n5:37340:37340 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.8.41<0> [1]ib1:10.149.8.41<0> + > number of parameters on model parallel rank 3: 805560320 +r9i1n7:8760:8760 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.119<0> [1]ib1:10.149.0.119<0> +r8i0n3:57933:57933 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.198<0> [1]ib1:10.149.0.198<0> +r8i0n3:57935:57935 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.198<0> [1]ib1:10.149.0.198<0> +r8i0n3:57934:57934 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.198<0> [1]ib1:10.149.0.198<0> +r7i4n4:1769:1769 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.79<0> [1]ib1:10.149.0.79<0> +r7i4n4:1768:1768 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.79<0> [1]ib1:10.149.0.79<0> +r7i1n3:4943:4943 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation + > number of parameters on model parallel rank 2: 805560320 + > number of parameters on model parallel rank 3: 805560320 +r9i1n5:40814:40814 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n4:24821:24821 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation + > number of parameters on model parallel rank 2: 805560320 + > number of parameters on model parallel rank 1: 805560320 + > number of parameters on model parallel rank 0: 805560320 +r7i1n3:4941:4941 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.51<0> [1]ib1:10.149.0.51<0> +r7i2n6:892:892 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.63<0> [1]ib1:10.149.0.63<0> +r7i1n3:4944:4944 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.51<0> [1]ib1:10.149.0.51<0> +r7i2n6:893:893 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.63<0> [1]ib1:10.149.0.63<0> +r7i2n6:895:895 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.63<0> [1]ib1:10.149.0.63<0> + > number of parameters on model parallel rank 2: 805560320 + > number of parameters on model parallel rank 0: 805560320 + > number of parameters on model parallel rank 2: 805560320 + > number of parameters on model parallel rank 0: 805560320 + > number of parameters on model parallel rank 0: 805560320 + > number of parameters on model parallel rank 2: 805560320 +r7i5n3:79997:79997 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r6i4n5:37342:37342 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation + > number of parameters on model parallel rank 1: 805560320 +r7i7n2:57611:57611 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.104<0> [1]ib1:10.149.0.104<0> +r7i3n0:38599:38599 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n7:8760:8760 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n6:58313:58313 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i4n5:46161:46161 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n5:40816:40816 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.117<0> [1]ib1:10.149.0.117<0> +r7i7n1:68405:68405 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i5n3:79995:79995 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.88<0> [1]ib1:10.149.0.88<0> +r7i7n2:57609:57609 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i7n2:57608:57608 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n6:58315:58315 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i3n0:38598:38598 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i0n3:57933:57933 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i1n3:4944:4944 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i4n4:1769:1769 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i4n4:1768:1768 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i0n3:57934:57934 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i0n3:57935:57935 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i7n1:68406:68406 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r6i4n5:37341:37341 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n5:40816:40816 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i1n3:4941:4941 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n5:40815:40815 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.117<0> [1]ib1:10.149.0.117<0> +r6i4n5:37340:37340 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n6:58312:58312 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.118<0> [1]ib1:10.149.0.118<0> +r7i7n2:57611:57611 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i4n4:1767:1767 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.79<0> [1]ib1:10.149.0.79<0> +r7i4n5:46159:46159 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.80<0> [1]ib1:10.149.0.80<0> +r8i0n3:57932:57932 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.198<0> [1]ib1:10.149.0.198<0> +r7i5n3:79995:79995 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n4:24819:24819 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.116<0> [1]ib1:10.149.0.116<0> +r9i1n4:24820:24820 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.116<0> [1]ib1:10.149.0.116<0> +r7i7n2:57610:57610 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.104<0> [1]ib1:10.149.0.104<0> +r7i5n3:79994:79994 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.88<0> +r7i2n6:893:893 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i2n6:895:895 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i2n6:892:892 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i7n0:55673:55673 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.102<0> [1]ib1:10.149.0.102<0> +r9i1n6:58312:58312 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i3n0:38601:38601 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.66<0> +r7i3n0:38601:38601 [3] NCCL INFO Using network IB +r7i5n3:79994:79994 [0] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r9i1n5:40815:40815 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i4n5:46158:46158 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.80<0> +r7i4n5:46158:46158 [0] NCCL INFO Using network IB +r8i0n3:57932:57932 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i4n4:1767:1767 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i7n0:55674:55674 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.102<0> [1]ib1:10.149.0.102<0> +r7i4n5:46159:46159 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n4:24821:24821 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.116<0> +r9i1n4:24821:24821 [3] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r7i7n2:57610:57610 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i1n3:4943:4943 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.51<0> +r7i1n3:4943:4943 [2] NCCL INFO Using network IB +r7i5n3:79996:79996 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.88<0> [1]ib1:10.149.0.88<0> +NCCL version 2.7.8+cuda10.2 +r9i1n5:40814:40814 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.117<0> +r9i1n5:40814:40814 [1] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +r7i4n5:46161:46161 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.80<0> +r7i4n5:46161:46161 [3] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r9i1n7:8760:8760 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.119<0> +r9i1n4:24819:24819 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n4:24820:24820 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n7:8760:8760 [0] NCCL INFO Using network IB +r7i3n0:38599:38599 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.66<0> +r7i3n0:38599:38599 [1] NCCL INFO Using network IB +r6i4n5:37342:37342 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.8.41<0> +r6i4n5:37342:37342 [3] NCCL INFO Using network IB +r7i5n3:79997:79997 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.88<0> +r7i5n3:79997:79997 [3] NCCL INFO Using network IB +r9i1n6:58313:58313 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.118<0> +NCCL version 2.7.8+cuda10.2 +r9i1n6:58313:58313 [1] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +r7i3n0:38598:38598 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.66<0> +r7i3n0:38598:38598 [0] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r7i7n0:55676:55676 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.102<0> [1]ib1:10.149.0.102<0> +r7i7n1:68407:68407 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.103<0> [1]ib1:10.149.0.103<0> +r9i1n5:40813:40813 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.117<0> [1]ib1:10.149.0.117<0> +r7i3n0:38600:38600 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.66<0> [1]ib1:10.149.0.66<0> +r7i4n4:1766:1766 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.79<0> [1]ib1:10.149.0.79<0> +r7i7n1:68405:68405 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.103<0> +r7i7n1:68405:68405 [1] NCCL INFO Using network IB +r7i7n0:55673:55673 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i4n5:46160:46160 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.80<0> [1]ib1:10.149.0.80<0> +r9i1n7:8763:8763 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.119<0> [1]ib1:10.149.0.119<0> +r7i1n3:4944:4944 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.51<0> +r9i1n5:40816:40816 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.117<0> +r9i1n5:40816:40816 [3] NCCL INFO Using network IB +r7i5n3:79996:79996 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i2n6:894:894 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.63<0> [1]ib1:10.149.0.63<0> +NCCL version 2.7.8+cuda10.2 +r9i1n6:58315:58315 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.118<0> +r9i1n6:58315:58315 [3] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r9i1n6:58314:58314 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.118<0> [1]ib1:10.149.0.118<0> +r7i6n8:29152:29152 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.101<0> [1]ib1:10.149.0.101<0> +r7i6n8:29153:29153 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.101<0> [1]ib1:10.149.0.101<0> +r7i6n8:29154:29154 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.101<0> [1]ib1:10.149.0.101<0> +r7i6n8:29155:29155 [3] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.101<0> [1]ib1:10.149.0.101<0> +r9i1n4:24818:24818 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.116<0> [1]ib1:10.149.0.116<0> +r6i4n5:37340:37340 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.8.41<0> +r6i4n5:37340:37340 [1] NCCL INFO Using network IB +r6i4n5:37341:37341 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.8.41<0> +NCCL version 2.7.8+cuda10.2 +r6i4n5:37341:37341 [2] NCCL INFO Using network IB +r7i1n3:4944:4944 [3] NCCL INFO Using network IB +r7i1n3:4941:4941 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.51<0> +r7i1n3:4941:4941 [0] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +r7i1n3:4942:4942 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.51<0> [1]ib1:10.149.0.51<0> +r7i7n0:55675:55675 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.102<0> [1]ib1:10.149.0.102<0> +r7i4n4:1769:1769 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.79<0> +r7i4n4:1768:1768 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.79<0> +r7i7n0:55674:55674 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i7n2:57611:57611 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.104<0> +r7i7n2:57609:57609 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.104<0> +NCCL version 2.7.8+cuda10.2 +r7i7n1:68404:68404 [0] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.103<0> [1]ib1:10.149.0.103<0> +r7i7n1:68406:68406 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.103<0> +r7i7n1:68406:68406 [2] NCCL INFO Using network IB +r7i7n2:57611:57611 [3] NCCL INFO Using network IB +r7i7n2:57609:57609 [1] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +r7i4n4:1769:1769 [3] NCCL INFO Using network IB +r7i4n4:1768:1768 [2] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +r8i0n3:57933:57933 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.198<0> +r7i5n3:79995:79995 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.88<0> +r8i0n3:57933:57933 [1] NCCL INFO Using network IB +r7i4n4:1766:1766 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i3n0:38600:38600 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n5:40813:40813 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n6:58314:58314 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n7:8763:8763 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n6:58312:58312 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.118<0> +r9i1n6:58312:58312 [0] NCCL INFO Using network IB +r7i4n5:46160:46160 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i2n6:894:894 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n7:8762:8762 [2] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.119<0> [1]ib1:10.149.0.119<0> +r9i1n7:8761:8761 [1] NCCL INFO Bootstrap : Using [0]ib0:10.148.0.119<0> [1]ib1:10.149.0.119<0> +r7i4n5:46159:46159 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.80<0> +r7i4n5:46159:46159 [1] NCCL INFO Using network IB +r7i5n3:79995:79995 [1] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +r7i7n2:57608:57608 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.104<0> +r7i7n2:57608:57608 [0] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r7i7n2:57610:57610 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.104<0> +r9i1n5:40815:40815 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.117<0> +r9i1n5:40815:40815 [2] NCCL INFO Using network IB +r7i7n2:57610:57610 [2] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r7i4n4:1767:1767 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.79<0> +r7i4n4:1767:1767 [1] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r9i1n4:24818:24818 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r8i0n3:57935:57935 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.198<0> +r8i0n3:57934:57934 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.198<0> +r8i0n3:57935:57935 [3] NCCL INFO Using network IB +r8i0n3:57934:57934 [2] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r7i1n3:4942:4942 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +NCCL version 2.7.8+cuda10.2NCCL version 2.7.8+cuda10.2 + +r8i0n3:57932:57932 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.198<0> +r8i0n3:57932:57932 [0] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +r7i7n0:55676:55676 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i7n1:68407:68407 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +NCCL version 2.7.8+cuda10.2 +r7i7n0:55675:55675 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i7n1:68404:68404 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +NCCL version 2.7.8+cuda10.2 +r7i6n8:29153:29153 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i6n8:29154:29154 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i6n8:29152:29152 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i6n8:29155:29155 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i2n6:892:892 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.63<0> +r7i2n6:892:892 [0] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r9i1n7:8761:8761 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r9i1n7:8762:8762 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +r7i5n3:79996:79996 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.88<0> +r7i5n3:79996:79996 [2] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r9i1n4:24820:24820 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.116<0> +r9i1n4:24819:24819 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.116<0> +r7i2n6:895:895 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.63<0> +r7i2n6:895:895 [3] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r7i2n6:893:893 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.63<0> +r7i2n6:893:893 [1] NCCL INFO Using network IB +r9i1n4:24820:24820 [2] NCCL INFO Using network IB +r9i1n4:24819:24819 [1] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +r9i1n5:40813:40813 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.117<0> +r9i1n5:40813:40813 [0] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r7i7n0:55673:55673 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.102<0> +r7i4n5:46160:46160 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.80<0> +r7i7n0:55673:55673 [0] NCCL INFO Using network IB +r7i2n6:894:894 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.63<0> +r7i4n4:1766:1766 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.79<0> +r7i4n5:46160:46160 [2] NCCL INFO Using network IB +r9i1n7:8763:8763 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.119<0> +r7i2n6:894:894 [2] NCCL INFO Using network IB +r9i1n6:58314:58314 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.118<0> +r9i1n6:58314:58314 [2] NCCL INFO Using network IB +r7i4n4:1766:1766 [0] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r9i1n7:8763:8763 [3] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +r7i7n0:55674:55674 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.102<0> +r7i7n0:55674:55674 [1] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r7i3n0:38600:38600 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.66<0> +r7i3n0:38600:38600 [2] NCCL INFO Using network IB +r7i1n3:4942:4942 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.51<0> +r9i1n4:24818:24818 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.116<0> +NCCL version 2.7.8+cuda10.2 +r7i1n3:4942:4942 [1] NCCL INFO Using network IB +r9i1n4:24818:24818 [0] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +r7i7n0:55675:55675 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.102<0> +r7i7n0:55676:55676 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.102<0> +r7i7n0:55675:55675 [2] NCCL INFO Using network IB +r7i7n0:55676:55676 [3] NCCL INFO Using network IB +r7i7n1:68407:68407 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.103<0> +r7i7n1:68407:68407 [3] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r7i7n1:68404:68404 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.103<0> +r7i7n1:68404:68404 [0] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +r9i1n7:8761:8761 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.119<0> +r9i1n7:8761:8761 [1] NCCL INFO Using network IB +r9i1n7:8762:8762 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.119<0> +r9i1n7:8762:8762 [2] NCCL INFO Using network IB +r7i6n8:29153:29153 [1] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.101<0> +r7i6n8:29155:29155 [3] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.101<0> +r7i6n8:29152:29152 [0] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.101<0> +r7i6n8:29153:29153 [1] NCCL INFO Using network IB +r7i6n8:29155:29155 [3] NCCL INFO Using network IB +r7i6n8:29152:29152 [0] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +NCCL version 2.7.8+cuda10.2 +r7i6n8:29154:29154 [2] NCCL INFO NET/IB : Using [0]hfi1_2:1/IB [1]hfi1_0:1/IB [2]hfi1_3:1/IB [3]hfi1_1:1/IB ; OOB ib0:10.148.0.101<0> +r7i6n8:29154:29154 [2] NCCL INFO Using network IB +NCCL version 2.7.8+cuda10.2 +r6i4n5:37339:37473 [0] NCCL INFO Channel 00/02 : 0 1 +r6i4n5:37339:37473 [0] NCCL INFO Channel 01/02 : 0 1 +r9i1n7:8760:8836 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n7:8760:8836 [0] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r6i4n5:37339:37473 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r6i4n5:37339:37473 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r6i4n5:37339:37473 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n7:8760:8836 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n7:8760:8836 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r6i4n5:37339:37473 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r9i1n7:8763:8841 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n7:8763:8841 [3] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r9i1n7:8763:8841 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r6i4n5:37342:37489 [3] NCCL INFO Channel 00/02 : 0 1 +r6i4n5:37342:37489 [3] NCCL INFO Channel 01/02 : 0 1 +r6i4n5:37342:37489 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r6i4n5:37342:37489 [3] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r6i4n5:37342:37489 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r6i4n5:37340:37494 [1] NCCL INFO Channel 00/02 : 0 1 +r9i1n7:8761:8850 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n7:8761:8850 [1] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r9i1n7:8761:8850 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r6i4n5:37340:37494 [1] NCCL INFO Channel 01/02 : 0 1 +r6i4n5:37340:37494 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r6i4n5:37340:37494 [1] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r6i4n5:37340:37494 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n7:8760:8836 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r6i4n5:37342:37489 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r9i1n7:8763:8841 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r6i4n5:37340:37494 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r9i1n7:8761:8850 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r6i4n5:37342:37489 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r9i1n7:8763:8841 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r6i4n5:37340:37494 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r9i1n7:8761:8850 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i5n3:79994:80066 [0] NCCL INFO Channel 00/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 01/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 02/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 03/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 04/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 05/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 06/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 07/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 08/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 09/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 10/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 11/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 12/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 13/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 14/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 15/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 16/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 17/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 18/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 19/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 20/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 21/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 22/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 23/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 24/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 25/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 26/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 27/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 28/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 29/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 30/32 : 0 +r7i5n3:79994:80066 [0] NCCL INFO Channel 31/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 00/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 01/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 02/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 03/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 04/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 05/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 06/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 07/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 08/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 09/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 10/32 : 0 +r6i4n5:37341:37495 [2] NCCL INFO Channel 00/02 : 0 1 +r6i4n5:37341:37495 [2] NCCL INFO Channel 01/02 : 0 1 +r7i3n0:38601:38671 [3] NCCL INFO Channel 11/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 12/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 13/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 14/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 15/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 16/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 17/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 18/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 19/32 : 0 +r9i1n7:8762:8851 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i3n0:38601:38671 [3] NCCL INFO Channel 20/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 21/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 22/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 23/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 24/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 25/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 26/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 27/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 28/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 29/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 30/32 : 0 +r7i3n0:38601:38671 [3] NCCL INFO Channel 31/32 : 0 +r6i4n5:37341:37495 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r6i4n5:37341:37495 [2] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r6i4n5:37341:37495 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n7:8762:8851 [2] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r9i1n7:8762:8851 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i5n3:79994:80066 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i5n3:79994:80066 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i3n0:38601:38671 [3] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i3n0:38601:38671 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n5:40814:40889 [1] NCCL INFO Channel 00/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 01/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 00/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 01/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 02/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 03/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 02/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 03/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 00/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 01/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 04/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 05/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 06/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 07/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 08/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 09/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 10/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 11/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 12/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 13/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 14/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 15/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 16/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 17/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 18/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 19/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 20/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 21/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 22/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 23/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 24/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 25/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 26/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 27/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 28/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 04/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 05/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 06/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 07/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 08/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 09/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 10/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 11/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 12/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 13/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 14/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 29/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 30/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Channel 31/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 15/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 16/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 17/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 18/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 19/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 20/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 21/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 22/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 23/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r9i1n4:24821:24891 [3] NCCL INFO Channel 24/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 25/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 26/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 27/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 28/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 00/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 01/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n1:68405:68481 [1] NCCL INFO Channel 00/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 01/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 29/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 30/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Channel 31/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r9i1n4:24821:24891 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i5n3:79997:80074 [3] NCCL INFO Channel 00/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 01/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 02/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 02/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 03/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 04/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 05/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 06/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 07/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 08/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 09/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 10/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 02/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 03/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 04/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 05/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 06/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 11/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 12/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 13/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 07/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 08/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 09/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 10/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 11/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 14/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 15/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 16/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 12/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 13/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 14/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 17/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 18/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 15/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 16/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 17/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 19/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 20/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 21/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 18/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 19/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 20/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 22/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 23/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 24/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 21/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 22/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 23/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 25/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 26/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 24/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 25/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 27/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 28/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 29/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 26/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 27/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 28/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 29/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 30/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Channel 31/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 30/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO Channel 31/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 03/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 04/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 05/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 06/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 07/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i4n5:46161:46236 [3] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i5n3:79997:80074 [3] NCCL INFO Channel 08/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 09/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 10/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 11/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 12/32 : 0 +r7i7n1:68405:68481 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n5:46161:46236 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i5n3:79997:80074 [3] NCCL INFO Channel 13/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 14/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 15/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 16/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 17/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 18/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 19/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 20/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 21/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 22/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 23/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 24/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 25/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 26/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 27/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 28/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 29/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 30/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Channel 31/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i5n3:79997:80074 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i3n0:38601:38671 [3] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i3n0:38601:38671 [3] NCCL INFO comm 0x1489a8001060 rank 0 nranks 1 cudaDev 3 busId 8a000 - Init COMPLETE +r7i5n3:79994:80066 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i5n3:79994:80066 [0] NCCL INFO comm 0x14d248001060 rank 0 nranks 1 cudaDev 0 busId 1a000 - Init COMPLETE +r7i5n3:79995:80078 [1] NCCL INFO Channel 00/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 01/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 02/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 03/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 04/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 05/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 06/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 07/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 08/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 09/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 10/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 11/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 12/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 13/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 14/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 15/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 16/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 17/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 18/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 19/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 20/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 21/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 22/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 23/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 24/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 25/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 26/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 27/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 28/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 29/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 30/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO Channel 31/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 00/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 01/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 02/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 03/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 04/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 05/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 06/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 07/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 08/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 09/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 10/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 11/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 12/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 13/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 14/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 15/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 16/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 17/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 18/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 19/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 20/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 21/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 22/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 23/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 24/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 25/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 26/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 27/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 28/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 29/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 00/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 30/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Channel 31/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 01/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 02/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 03/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 04/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 05/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 06/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 07/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 08/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 09/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 10/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 11/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 12/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 13/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 14/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 15/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 16/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 17/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 18/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 19/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 20/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 21/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 22/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 23/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 24/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 25/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 26/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 27/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 28/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 29/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 30/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Channel 31/32 : 0 +r7i3n0:38598:38682 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i3n0:38598:38682 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r6i4n5:37342:37489 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r9i1n7:8763:8841 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i5n3:79995:80078 [1] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i5n3:79995:80078 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i3n0:38599:38681 [1] NCCL INFO Channel 00/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 01/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 02/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 03/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i7n2:57611:57695 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i1n3:4943:5014 [2] NCCL INFO Channel 02/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 03/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 04/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 05/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 06/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 07/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 08/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 09/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 10/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 11/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 12/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 13/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 00/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 01/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 02/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 14/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 15/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 16/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 17/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 18/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 19/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 20/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 21/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 22/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 23/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 24/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 25/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 26/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 27/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 28/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 29/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 30/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Channel 31/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/-1/ +r7i1n3:4943:5014 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n5:46158:46235 [0] NCCL INFO Channel 03/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 04/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 05/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 06/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 07/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 08/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 09/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 10/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 11/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 12/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 13/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 14/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 15/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 16/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 17/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 18/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 19/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 20/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 21/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 22/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 00/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 01/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 04/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 05/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 06/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 07/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 08/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 09/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 10/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 11/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 23/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 24/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 12/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 13/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 14/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 15/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 16/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 17/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 18/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 19/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 25/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 26/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 27/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 20/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 21/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 22/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 23/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 28/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 29/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 30/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 24/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 25/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 26/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 27/32 : 0 +r7i4n5:46158:46235 [0] NCCL INFO Channel 31/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 28/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 29/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 30/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Channel 31/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 02/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 03/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 04/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 05/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 06/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r8i0n3:57933:58020 [1] NCCL INFO Channel 07/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 08/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 09/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 10/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 11/32 : 0 +r7i3n0:38599:38681 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r8i0n3:57933:58020 [1] NCCL INFO Channel 12/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 13/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 14/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 15/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 16/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 17/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 18/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 19/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 20/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 21/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 22/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 23/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 24/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 25/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 26/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 27/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 28/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 29/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 30/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO Channel 31/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r9i1n4:24821:24891 [3] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r9i1n6:58313:58389 [1] NCCL INFO Channel 00/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 01/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 02/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 03/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 04/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 05/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 06/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 07/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 08/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 09/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 10/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 11/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 12/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 13/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 14/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 15/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 16/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 17/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 18/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 19/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 20/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 21/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 22/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 23/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 24/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 25/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 26/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 27/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 28/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 29/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 30/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO Channel 31/32 : 0 +r9i1n5:40814:40889 [1] NCCL INFO comm 0x145f84001060 rank 0 nranks 1 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n1:68405:68481 [1] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i4n5:46158:46235 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r9i1n7:8761:8850 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i4n5:46158:46235 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r8i0n3:57933:58020 [1] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r8i0n3:57933:58020 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n1:68405:68481 [1] NCCL INFO comm 0x14adb0001060 rank 0 nranks 1 cudaDev 1 busId 1c000 - Init COMPLETE +r7i4n4:1769:1900 [3] NCCL INFO Channel 00/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 01/32 : 0 +r9i1n4:24821:24891 [3] NCCL INFO comm 0x151164001060 rank 0 nranks 1 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n7:8762:8851 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r7i4n4:1769:1900 [3] NCCL INFO Channel 02/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 03/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 04/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 05/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 06/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 07/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 08/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 09/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 10/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 11/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 12/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 13/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 14/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 15/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 16/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 17/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 18/32 : 0 +r6i4n5:37340:37494 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i4n4:1769:1900 [3] NCCL INFO Channel 19/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 20/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 21/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 22/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 23/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 24/32 : 0 +r7i4n5:46161:46236 [3] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i5n3:79997:80074 [3] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r9i1n5:40816:40896 [3] NCCL INFO Channel 00/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 01/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 02/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 03/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 04/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 05/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 06/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 07/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 08/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 09/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 10/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 11/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 12/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 13/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 14/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 15/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 16/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 17/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 18/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 19/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 20/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 00/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 01/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 02/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 21/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 22/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 23/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 24/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 25/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 26/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 27/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 28/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 29/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 30/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Channel 31/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r9i1n5:40816:40896 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n6:58313:58389 [1] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r9i1n6:58313:58389 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n5:46159:46242 [1] NCCL INFO Channel 00/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 01/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 02/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 03/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 04/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 05/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 06/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 07/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 08/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 09/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 10/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 11/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 12/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 13/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 14/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 15/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 16/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 17/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 18/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 19/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 20/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 21/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 22/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 23/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 24/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 25/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 26/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 27/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 28/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 00/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 01/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 02/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 29/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 30/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Channel 31/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i4n5:46159:46242 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n5:46161:46236 [3] NCCL INFO comm 0x1546a0001060 rank 0 nranks 1 cudaDev 3 busId 8a000 - Init COMPLETE +r7i2n6:892:979 [0] NCCL INFO Channel 03/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 04/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 05/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 06/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 07/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 08/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 09/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 10/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 11/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 12/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 13/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 14/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 15/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 16/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 17/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 18/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 19/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 20/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 21/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 22/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 00/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 23/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 24/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 25/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 26/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 27/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 28/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 29/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 30/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Channel 31/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 25/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 26/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 27/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 28/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 29/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 30/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Channel 31/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/-1/ +r7i4n4:1769:1900 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n4:24820:24907 [2] NCCL INFO Channel 00/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 01/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 02/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 00/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 01/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 02/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 03/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 04/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 05/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 06/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 07/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 08/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 09/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 10/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 11/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 12/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 13/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 03/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 04/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 05/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 06/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 07/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 14/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 15/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 16/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 08/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 09/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 10/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 11/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 12/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 13/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 14/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 17/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 18/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 03/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 04/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 05/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 06/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 07/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 15/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 16/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 17/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 19/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 20/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 21/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 08/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 09/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 10/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 11/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 12/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 18/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 19/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 20/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 22/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 23/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 24/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 13/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 14/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 15/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 21/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 22/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 25/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 26/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 27/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 16/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 17/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 18/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 01/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 02/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 03/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 28/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 29/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 30/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 19/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 20/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 21/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 04/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 05/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 06/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 07/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 08/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Channel 31/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 22/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 23/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 24/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 00/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 01/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 09/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 10/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 11/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 12/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i7n1:68406:68482 [2] NCCL INFO Channel 25/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 26/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 13/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 14/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 15/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 16/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 17/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n5:40815:40897 [2] NCCL INFO Channel 18/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 19/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 20/32 : 0 +r7i5n3:79997:80074 [3] NCCL INFO comm 0x151654001060 rank 0 nranks 1 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n5:40815:40897 [2] NCCL INFO Channel 21/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 22/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 23/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 24/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 25/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 26/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 27/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 28/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 29/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 30/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Channel 31/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r9i1n5:40815:40897 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i3n0:38600:38688 [2] NCCL INFO Channel 02/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 03/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 04/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 05/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 06/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 07/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 08/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 09/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 10/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 11/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 12/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 13/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 14/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 15/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 16/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 17/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 18/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 19/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 20/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 21/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 22/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 23/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 24/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 25/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 26/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 27/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 28/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 29/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 30/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO Channel 31/32 : 0 +r7i1n3:4943:5014 [2] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i1n3:4943:5014 [2] NCCL INFO comm 0x153440001060 rank 0 nranks 1 cudaDev 2 busId 88000 - Init COMPLETE +r7i1n3:4944:5025 [3] NCCL INFO Channel 00/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 01/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 02/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 03/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 04/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 05/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 06/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 07/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 08/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 09/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 10/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 11/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 12/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 13/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 14/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 15/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 16/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 17/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 18/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 19/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 20/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 21/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 22/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 23/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 24/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 25/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 26/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 27/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 28/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 29/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 30/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Channel 31/32 : 0 +r7i1n3:4944:5025 [3] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/-1/ +r7i1n3:4944:5025 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i5n3:79995:80078 [1] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i1n3:4941:5026 [0] NCCL INFO Channel 00/32 : 0 +r6i4n5:37341:37495 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i4n5:46160:46248 [2] NCCL INFO Channel 00/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 01/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 02/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 03/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 04/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 05/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 06/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 07/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 08/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 00/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 09/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 10/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 11/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 12/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 13/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 14/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 15/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 16/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 17/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 18/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 19/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 20/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 21/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 22/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 23/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 24/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 25/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 26/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 27/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 28/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 29/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 30/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 01/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 02/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 03/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Channel 31/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 04/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 05/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 06/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i7n2:57609:57696 [1] NCCL INFO Channel 07/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 08/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n2:57609:57696 [1] NCCL INFO Channel 09/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 10/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 11/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 00/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 01/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 12/32 : 0 +r7i7n2:57611:57695 [3] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i7n2:57609:57696 [1] NCCL INFO Channel 13/32 : 0 +r7i5n3:79995:80078 [1] NCCL INFO comm 0x150924001060 rank 0 nranks 1 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n2:57609:57696 [1] NCCL INFO Channel 14/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 15/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 16/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 17/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 18/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 19/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 20/32 : 0 +r7i2n6:892:979 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/-1/-1 +r7i2n6:892:979 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n4:24820:24907 [2] NCCL INFO Channel 23/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 24/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 25/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 27/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 28/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 29/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 26/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 27/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 28/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 30/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Channel 31/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 29/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 30/32 : 0 +r9i1n4:24820:24907 [2] NCCL INFO Channel 31/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r9i1n4:24820:24907 [2] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i7n1:68406:68482 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n4:24820:24907 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n5:40813:40903 [0] NCCL INFO Channel 02/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 03/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 04/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 05/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 06/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 07/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 08/32 : 0 +r8i0n3:57933:58020 [1] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r9i1n5:40813:40903 [0] NCCL INFO Channel 09/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 10/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 11/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 12/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 13/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 14/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 15/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 16/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 17/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 18/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 19/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 20/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 21/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 22/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 23/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 24/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 25/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 26/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 27/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 28/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 29/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 30/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Channel 31/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r9i1n5:40813:40903 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i3n0:38600:38688 [2] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i4n5:46158:46235 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i3n0:38600:38688 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i3n0:38598:38682 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i3n0:38599:38681 [1] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i3n0:38598:38682 [0] NCCL INFO comm 0x14ee7c001060 rank 0 nranks 1 cudaDev 0 busId 1a000 - Init COMPLETE +r7i3n0:38599:38681 [1] NCCL INFO comm 0x14dda0001060 rank 0 nranks 1 cudaDev 1 busId 1c000 - Init COMPLETE +r8i0n3:57933:58020 [1] NCCL INFO comm 0x150af4001060 rank 0 nranks 1 cudaDev 1 busId 1c000 - Init COMPLETE +r7i1n3:4941:5026 [0] NCCL INFO Channel 01/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 02/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 03/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 04/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 05/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 06/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 07/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 08/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 09/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 10/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 11/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 12/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 13/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 14/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 15/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 16/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 17/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 18/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 19/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 20/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 21/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 22/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 23/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 24/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 25/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 26/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 27/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 28/32 : 0 +r9i1n6:58313:58389 [1] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i1n3:4941:5026 [0] NCCL INFO Channel 29/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 30/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Channel 31/32 : 0 +r7i1n3:4941:5026 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/-1/ +r7i1n3:4941:5026 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i4n5:46158:46235 [0] NCCL INFO comm 0x14ee3c001060 rank 0 nranks 1 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n2:57609:57696 [1] NCCL INFO Channel 21/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 22/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 23/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 24/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 25/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 26/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 27/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 28/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 29/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 30/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Channel 31/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i7n2:57609:57696 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n2:57611:57695 [3] NCCL INFO comm 0x150660001060 rank 0 nranks 1 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n2:57608:57697 [0] NCCL INFO Channel 00/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 01/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 02/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 03/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 04/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 05/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 06/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 07/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 08/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 09/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 10/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 11/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 12/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 13/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 14/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 15/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 16/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 17/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 18/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 19/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 20/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 21/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 22/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 23/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 24/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 25/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 26/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 27/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 28/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 29/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 30/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Channel 31/32 : 0 +r7i7n2:57608:57697 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i7n2:57608:57697 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i1n3:4942:5032 [1] NCCL INFO Channel 00/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 01/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 02/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 03/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 04/32 : 0 +r6i4n5:37342:37489 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r9i1n6:58313:58389 [1] NCCL INFO comm 0x150c30001060 rank 0 nranks 1 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n5:40816:40896 [3] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i4n4:1768:1901 [2] NCCL INFO Channel 00/32 : 0 +r9i1n7:8763:8841 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r8i0n3:57934:58022 [2] NCCL INFO Channel 00/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 01/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 02/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 00/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 01/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 02/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 05/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 06/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 07/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 08/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 09/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 10/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 11/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 12/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 03/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 04/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 05/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 13/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 14/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 15/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 16/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 17/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 18/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 19/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 20/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 21/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 22/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 06/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 07/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 23/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 24/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 25/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 26/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 27/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 28/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 08/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 09/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 10/32 : 0 +r7i2n6:892:979 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i1n3:4942:5032 [1] NCCL INFO Channel 29/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 30/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Channel 31/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 11/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 12/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/-1/ +r9i1n6:58315:58392 [3] NCCL INFO Channel 00/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 13/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 14/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 15/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 16/32 : 0 +r7i1n3:4942:5032 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n6:58315:58392 [3] NCCL INFO Channel 01/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 02/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 17/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 18/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 03/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 04/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 05/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 06/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 19/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 20/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 21/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 07/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 08/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 22/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 23/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 24/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 09/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 10/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 11/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 25/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 26/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 27/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 12/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 13/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 28/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 29/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 14/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 15/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 16/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 30/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Channel 31/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r9i1n6:58315:58392 [3] NCCL INFO Channel 17/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 18/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 19/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r9i1n6:58315:58392 [3] NCCL INFO Channel 20/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 21/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n6:58315:58392 [3] NCCL INFO Channel 22/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 23/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 24/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 25/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 26/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 27/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 28/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 29/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 30/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Channel 31/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r9i1n6:58315:58392 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n6:58312:58396 [0] NCCL INFO Channel 00/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 01/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 02/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 03/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 04/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 05/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 06/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 07/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 08/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 01/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 02/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 03/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 09/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 10/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 11/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 04/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 05/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 06/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 07/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 12/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 13/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 14/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 08/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 09/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 15/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 16/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 17/32 : 0 +r9i1n5:40816:40896 [3] NCCL INFO comm 0x152d8c001060 rank 0 nranks 1 cudaDev 3 busId 8a000 - Init COMPLETE +r7i4n4:1768:1901 [2] NCCL INFO Channel 10/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 11/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 12/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 18/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 19/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 20/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 13/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 14/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 15/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 21/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 22/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 16/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 17/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 23/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 24/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 25/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 26/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 27/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 18/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 19/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 28/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 29/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 30/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Channel 31/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 20/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 21/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 22/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i4n4:1768:1901 [2] NCCL INFO Channel 23/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 24/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 25/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i4n4:1768:1901 [2] NCCL INFO Channel 26/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 27/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 28/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 29/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 30/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Channel 31/32 : 0 +r7i4n4:1768:1901 [2] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/-1/ +r7i4n4:1768:1901 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n4:1769:1900 [3] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i7n1:68406:68482 [2] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r9i1n7:8761:8850 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r8i0n3:57934:58022 [2] NCCL INFO Channel 03/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 04/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 05/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 06/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 07/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 08/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 09/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 10/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 11/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 12/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 13/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 14/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 15/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 16/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 17/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 18/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 19/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 20/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 21/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 22/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 23/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 24/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 25/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 26/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 27/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 28/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 29/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 30/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Channel 31/32 : 0 +r8i0n3:57934:58022 [2] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r9i1n4:24820:24907 [2] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r8i0n3:57934:58022 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i1n3:4944:5025 [3] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i1n3:4944:5025 [3] NCCL INFO comm 0x1454e8001060 rank 0 nranks 1 cudaDev 3 busId 8a000 - Init COMPLETE +r7i2n6:892:979 [0] NCCL INFO comm 0x151ecc001060 rank 0 nranks 1 cudaDev 0 busId 1a000 - Init COMPLETE +r7i2n6:893:981 [1] NCCL INFO Channel 00/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 01/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 02/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 03/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 04/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 05/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 06/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 07/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 08/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 09/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 10/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 11/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 12/32 : 0 +r7i4n5:46159:46242 [1] NCCL INFO comm 0x14e138001060 rank 0 nranks 1 cudaDev 1 busId 1c000 - Init COMPLETE +r7i2n6:893:981 [1] NCCL INFO Channel 13/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 14/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 15/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 16/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 17/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 18/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 19/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 20/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 21/32 : 0 +r7i4n4:1769:1900 [3] NCCL INFO comm 0x14a1a0001060 rank 0 nranks 1 cudaDev 3 busId 8a000 - Init COMPLETE +r7i2n6:893:981 [1] NCCL INFO Channel 22/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 23/32 : 0 +r7i5n3:79996:80084 [2] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i4n4:1767:1902 [1] NCCL INFO Channel 00/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 01/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 02/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 24/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 25/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 26/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 03/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 04/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 05/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 06/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 00/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 01/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 27/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 28/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i4n4:1767:1902 [1] NCCL INFO Channel 07/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 08/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 09/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 10/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 02/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 03/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 29/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 30/32 : 0 +r9i1n5:40815:40897 [2] NCCL INFO comm 0x15114c001060 rank 0 nranks 1 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n4:1767:1902 [1] NCCL INFO Channel 11/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 12/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 04/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 05/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 06/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Channel 31/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 13/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 14/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 07/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 08/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/-1/-1 +r7i4n4:1767:1902 [1] NCCL INFO Channel 15/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 16/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 17/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 18/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 09/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 10/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 11/32 : 0 +r7i2n6:893:981 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n4:1767:1902 [1] NCCL INFO Channel 19/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 20/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 12/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 13/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 14/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 15/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 16/32 : 0 +r6i4n5:37340:37494 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r9i1n6:58314:58402 [2] NCCL INFO Channel 17/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 18/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 19/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 20/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 21/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 22/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 23/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 24/32 : 0 +r7i7n2:57609:57696 [1] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r9i1n6:58314:58402 [2] NCCL INFO Channel 25/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 26/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 27/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 28/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 29/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 30/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Channel 31/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r9i1n6:58314:58402 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i6n8:29154:29242 [2] NCCL INFO Channel 00/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 01/32 : 0 +r7i7n1:68406:68482 [2] NCCL INFO comm 0x15406c001060 rank 0 nranks 1 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n7:8762:8851 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r9i1n4:24820:24907 [2] NCCL INFO comm 0x152110001060 rank 0 nranks 1 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n4:24818:24909 [0] NCCL INFO Channel 00/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 01/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 02/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 03/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 04/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 05/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 06/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 07/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 00/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 08/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 09/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 10/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 11/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 01/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 02/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 03/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 00/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 01/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 21/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 22/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 23/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 12/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 13/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 14/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 04/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 05/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i5n3:79996:80084 [2] NCCL INFO comm 0x14fa84001060 rank 0 nranks 1 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n4:1767:1902 [1] NCCL INFO Channel 24/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 25/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 26/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 15/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 16/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 06/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 07/32 : 0 +r7i4n5:46160:46248 [2] NCCL INFO comm 0x14aa04001060 rank 0 nranks 1 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n4:1767:1902 [1] NCCL INFO Channel 27/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 28/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 29/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 17/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 18/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 08/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 09/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i4n4:1767:1902 [1] NCCL INFO Channel 30/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Channel 31/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 19/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 20/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 21/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 10/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 11/32 : 0 +r9i1n5:40813:40903 [0] NCCL INFO comm 0x1503a0001060 rank 0 nranks 1 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n4:1767:1902 [1] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/-1/ +r9i1n4:24818:24909 [0] NCCL INFO Channel 22/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 23/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 24/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 12/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 13/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 00/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n4:1766:1908 [0] NCCL INFO Channel 01/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 02/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 03/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 04/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 05/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 06/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 25/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 26/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 14/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 15/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 07/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 08/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 09/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 10/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 11/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 27/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 28/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 29/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 16/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 17/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 00/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 01/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 02/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 12/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 13/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 14/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 15/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 30/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Channel 31/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 18/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 19/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 16/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 17/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 18/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 19/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i2n6:895:980 [3] NCCL INFO Channel 20/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 21/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 20/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 21/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 22/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 23/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i2n6:895:980 [3] NCCL INFO Channel 22/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 23/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 24/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 25/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 26/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 00/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 24/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 25/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 27/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 28/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 29/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 26/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 27/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 30/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Channel 31/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 28/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 29/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/-1/ +r7i2n6:895:980 [3] NCCL INFO Channel 30/32 : 0 +r7i2n6:895:980 [3] NCCL INFO Channel 31/32 : 0 +r7i4n4:1766:1908 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i2n6:895:980 [3] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/-1/-1 +r7i2n6:895:980 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i1n3:4941:5026 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i7n2:57609:57696 [1] NCCL INFO comm 0x14cd70001060 rank 0 nranks 1 cudaDev 1 busId 1c000 - Init COMPLETE +r7i6n8:29154:29242 [2] NCCL INFO Channel 02/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 03/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 04/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 05/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 06/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 07/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 08/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 09/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 10/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 11/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 12/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 13/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 14/32 : 0 +r6i4n5:37339:37473 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i6n8:29154:29242 [2] NCCL INFO Channel 15/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 16/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 17/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 18/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 19/32 : 0 +r7i3n0:38600:38688 [2] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i6n8:29154:29242 [2] NCCL INFO Channel 20/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 21/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 22/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 23/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 24/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 25/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 26/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 27/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 28/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 29/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 30/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Channel 31/32 : 0 +r7i6n8:29154:29242 [2] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i6n8:29154:29242 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n0:55674:55761 [1] NCCL INFO Channel 03/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 04/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 05/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 06/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 07/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 08/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 09/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 10/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 11/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 12/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 13/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 14/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 15/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 16/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 17/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 18/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 19/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 20/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 21/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 22/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 23/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 24/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 25/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 26/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 27/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 28/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 29/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 30/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Channel 31/32 : 0 +r7i7n0:55674:55761 [1] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i7n0:55674:55761 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r8i0n3:57932:58023 [0] NCCL INFO Channel 02/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 00/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 03/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 01/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 01/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 02/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 04/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 02/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 05/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 03/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 06/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 04/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 07/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 05/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 03/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 04/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 08/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 06/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 09/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 07/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 10/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 08/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 11/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 09/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 05/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 06/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 07/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 08/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 12/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 10/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 13/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 11/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 14/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 12/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 15/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 09/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 10/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 11/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 13/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 16/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 14/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 17/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 15/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 18/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 16/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 12/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 13/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 19/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 17/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 18/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 20/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 19/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 21/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 14/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 15/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 20/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 22/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 21/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 23/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 22/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 24/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 23/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 16/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 17/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 18/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 19/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 25/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 24/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 26/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 25/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 27/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 26/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 28/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 27/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 20/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 21/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 22/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 29/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 28/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 30/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 29/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Channel 31/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 30/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 23/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 24/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 25/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Channel 31/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 26/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 27/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 28/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r9i1n4:24819:24908 [1] NCCL INFO Channel 29/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 30/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO Channel 31/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 00/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 01/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 02/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 03/32 : 0 +r8i0n3:57935:58021 [3] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r9i1n4:24819:24908 [1] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i2n6:894:982 [2] NCCL INFO Channel 04/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 05/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 06/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 07/32 : 0 +r8i0n3:57932:58023 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r8i0n3:57935:58021 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n4:24819:24908 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i2n6:894:982 [2] NCCL INFO Channel 08/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 09/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 10/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 11/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 12/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 13/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 14/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 15/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 16/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 17/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 18/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 19/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 20/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 21/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 22/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 23/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 24/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 25/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 26/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 27/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 28/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 29/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 30/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Channel 31/32 : 0 +r7i2n6:894:982 [2] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/-1/-1 +r7i2n6:894:982 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r6i4n5:37341:37495 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i7n2:57608:57697 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i3n0:38600:38688 [2] NCCL INFO comm 0x1455d0001060 rank 0 nranks 1 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n2:57608:57697 [0] NCCL INFO comm 0x14fec4001060 rank 0 nranks 1 cudaDev 0 busId 1a000 - Init COMPLETE +r7i1n3:4941:5026 [0] NCCL INFO comm 0x151800001060 rank 0 nranks 1 cudaDev 0 busId 1a000 - Init COMPLETE +r8i0n3:57934:58022 [2] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i1n3:4942:5032 [1] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i1n3:4942:5032 [1] NCCL INFO comm 0x14e9b4001060 rank 0 nranks 1 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n0:55673:55760 [0] NCCL INFO Channel 00/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 01/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 02/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r8i0n3:57934:58022 [2] NCCL INFO comm 0x154a4c001060 rank 0 nranks 1 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n4:1768:1901 [2] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i2n6:893:981 [1] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i7n0:55673:55760 [0] NCCL INFO Channel 03/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 04/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 05/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 06/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 07/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 08/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i7n0:55673:55760 [0] NCCL INFO Channel 09/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 10/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 11/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 12/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 13/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 14/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 15/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 16/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 17/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 18/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 19/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 20/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 21/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 22/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 23/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 24/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 25/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 26/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 27/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 28/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 29/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 30/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Channel 31/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i7n0:55673:55760 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n6:58314:58402 [2] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i4n4:1768:1901 [2] NCCL INFO comm 0x153bc8001060 rank 0 nranks 1 cudaDev 2 busId 88000 - Init COMPLETE +r7i2n6:893:981 [1] NCCL INFO comm 0x153680001060 rank 0 nranks 1 cudaDev 1 busId 1c000 - Init COMPLETE +r7i6n8:29152:29240 [0] NCCL INFO Channel 00/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 01/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 02/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 03/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 04/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 05/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 06/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 07/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 08/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 09/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 10/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 11/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 12/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 13/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 14/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 15/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 16/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 17/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 18/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 19/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 20/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 21/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 22/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 23/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 24/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 25/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 26/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 27/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 28/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 29/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 30/32 : 0 +r7i6n8:29152:29240 [0] NCCL INFO Channel 31/32 : 0 +r7i7n2:57610:57698 [2] NCCL INFO comm 0x14f55c001060 rank 0 nranks 1 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n0:55676:55762 [3] NCCL INFO Channel 00/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 01/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 02/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 03/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 04/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 05/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 06/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 07/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 08/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 09/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 10/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 11/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 12/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 13/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 14/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 15/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 16/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 17/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 18/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 19/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 20/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 21/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 22/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 23/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 24/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 25/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 26/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 27/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 28/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 29/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 30/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Channel 31/32 : 0 +r7i7n0:55676:55762 [3] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i7n0:55676:55762 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n0:55675:55763 [2] NCCL INFO Channel 00/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 01/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 02/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 03/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 04/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 05/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 06/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 07/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 08/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 09/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 10/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 11/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 12/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 13/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 14/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 15/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 16/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 17/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 18/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 19/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 20/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 21/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 22/32 : 0 +r7i4n4:1767:1902 [1] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i7n0:55675:55763 [2] NCCL INFO Channel 23/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 24/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 25/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 26/32 : 0 +r9i1n6:58312:58396 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i7n0:55675:55763 [2] NCCL INFO Channel 27/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 28/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 29/32 : 0 +r9i1n6:58314:58402 [2] NCCL INFO comm 0x14cbd0001060 rank 0 nranks 1 cudaDev 2 busId 88000 - Init COMPLETE +r7i6n8:29152:29240 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i7n0:55675:55763 [2] NCCL INFO Channel 30/32 : 0 +r7i7n0:55675:55763 [2] NCCL INFO Channel 31/32 : 0 +r9i1n6:58315:58392 [3] NCCL INFO comm 0x14554c001060 rank 0 nranks 1 cudaDev 3 busId 8a000 - Init COMPLETE +r7i6n8:29152:29240 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n0:55675:55763 [2] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r9i1n6:58312:58396 [0] NCCL INFO comm 0x14aa2c001060 rank 0 nranks 1 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n0:55675:55763 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n4:1766:1908 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i6n8:29154:29242 [2] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i4n4:1767:1902 [1] NCCL INFO comm 0x149c6c001060 rank 0 nranks 1 cudaDev 1 busId 1c000 - Init COMPLETE +r7i6n8:29154:29242 [2] NCCL INFO comm 0x148034001060 rank 0 nranks 1 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n0:55674:55761 [1] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i4n4:1766:1908 [0] NCCL INFO comm 0x14e71c001060 rank 0 nranks 1 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n0:55674:55761 [1] NCCL INFO comm 0x14deb0001060 rank 0 nranks 1 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n1:68407:68493 [3] NCCL INFO Channel 00/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 01/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 02/32 : 0 +r9i1n4:24818:24909 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i2n6:894:982 [2] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r8i0n3:57935:58021 [3] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i7n1:68407:68493 [3] NCCL INFO Channel 03/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 04/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 05/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 06/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 07/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 08/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 09/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 10/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 11/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 12/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 13/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 14/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 15/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 16/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 17/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 18/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 19/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 20/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 21/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 22/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 23/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 24/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 25/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 26/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 27/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i7n1:68407:68493 [3] NCCL INFO Channel 28/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 29/32 : 0 +r9i1n4:24819:24908 [1] NCCL INFO comm 0x14f4cc001060 rank 0 nranks 1 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n1:68407:68493 [3] NCCL INFO Channel 30/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Channel 31/32 : 0 +r7i7n1:68407:68493 [3] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i7n1:68407:68493 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n1:68404:68494 [0] NCCL INFO Channel 00/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 01/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 02/32 : 0 +r7i7n0:55673:55760 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i7n1:68404:68494 [0] NCCL INFO Channel 03/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 04/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 05/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 00/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 01/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 02/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 03/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 04/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 06/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 07/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 08/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 09/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 10/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 11/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 12/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 13/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 14/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 15/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 16/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 17/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 18/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 19/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 20/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 21/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 22/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 23/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 24/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 25/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 26/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 27/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 28/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 29/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 30/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Channel 31/32 : 0 +r7i7n1:68404:68494 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i7n1:68404:68494 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i2n6:895:980 [3] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i2n6:894:982 [2] NCCL INFO comm 0x152d0c001060 rank 0 nranks 1 cudaDev 2 busId 88000 - Init COMPLETE +r7i2n6:895:980 [3] NCCL INFO comm 0x149e6c001060 rank 0 nranks 1 cudaDev 3 busId 8a000 - Init COMPLETE +r8i0n3:57932:58023 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r9i1n4:24818:24909 [0] NCCL INFO comm 0x151268001060 rank 0 nranks 1 cudaDev 0 busId 1a000 - Init COMPLETE +r8i0n3:57935:58021 [3] NCCL INFO comm 0x15212c001060 rank 0 nranks 1 cudaDev 3 busId 8a000 - Init COMPLETE +r8i0n3:57932:58023 [0] NCCL INFO comm 0x152a60001060 rank 0 nranks 1 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n0:55673:55760 [0] NCCL INFO comm 0x149258001060 rank 0 nranks 1 cudaDev 0 busId 1a000 - Init COMPLETE +r7i6n8:29155:29239 [3] NCCL INFO Channel 05/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 06/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 07/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 08/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 09/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 10/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 11/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 12/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 13/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 14/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 15/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 16/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 17/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 18/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 19/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 20/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 21/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 22/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 23/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 24/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 25/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 26/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 27/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 28/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 29/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 30/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Channel 31/32 : 0 +r7i6n8:29155:29239 [3] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i6n8:29155:29239 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i6n8:29153:29241 [1] NCCL INFO Channel 00/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 01/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 02/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 03/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 04/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 05/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 06/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 07/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 08/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 09/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 10/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 11/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 12/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 13/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 14/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 15/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 16/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 17/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 18/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 19/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 20/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 21/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 22/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 23/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 24/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 25/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 26/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 27/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 28/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 29/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 30/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Channel 31/32 : 0 +r7i6n8:29153:29241 [1] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r7i6n8:29153:29241 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n7:8760:8836 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r9i1n7:8762:8851 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r7i6n8:29152:29240 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i6n8:29152:29240 [0] NCCL INFO comm 0x150478001060 rank 0 nranks 1 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n0:55675:55763 [2] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i7n0:55676:55762 [3] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r6i4n5:37341:37495 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r6i4n5:37339:37473 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i7n0:55675:55763 [2] NCCL INFO comm 0x1490fc001060 rank 0 nranks 1 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n0:55676:55762 [3] NCCL INFO comm 0x151090001060 rank 0 nranks 1 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n1:68407:68493 [3] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i7n1:68407:68493 [3] NCCL INFO comm 0x14d87c001060 rank 0 nranks 1 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n1:68404:68494 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i7n1:68404:68494 [0] NCCL INFO comm 0x1455a4001060 rank 0 nranks 1 cudaDev 0 busId 1a000 - Init COMPLETE +r7i6n8:29153:29241 [1] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i6n8:29153:29241 [1] NCCL INFO comm 0x15448c001060 rank 0 nranks 1 cudaDev 1 busId 1c000 - Init COMPLETE +r7i6n8:29155:29239 [3] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r7i6n8:29155:29239 [3] NCCL INFO comm 0x14bfb4001060 rank 0 nranks 1 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n7:8760:8836 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r9i1n7:8762:8851 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r6i4n5:37341:37495 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r6i4n5:37339:37473 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r6i4n5:37339:37473 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r6i4n5:37339:37473 [0] NCCL INFO comm 0x147eb8001060 rank 0 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r6i4n5:37339:37339 [0] NCCL INFO Launch mode Parallel + > number of parameters on model parallel rank 0: 917757952 +r6i4n5:37342:37489 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r6i4n5:37340:37494 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r6i4n5:37342:37489 [3] NCCL INFO comm 0x152c88001060 rank 0 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r6i4n5:37340:37494 [1] NCCL INFO comm 0x15440c001060 rank 0 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r6i4n5:37342:37342 [3] NCCL INFO Launch mode Parallel +r6i4n5:37340:37340 [1] NCCL INFO Launch mode Parallel +r9i1n7:8761:8850 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n7:8763:8841 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n7:8761:8850 [1] NCCL INFO comm 0x154a58001060 rank 1 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n7:8763:8841 [3] NCCL INFO comm 0x155098001060 rank 1 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE + > number of parameters on model parallel rank 3: 917757952 + > number of parameters on model parallel rank 1: 917757952 + > number of parameters on model parallel rank 1: 917774336 > number of parameters on model parallel rank 3: 917774336 + +r9i1n7:8760:8836 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n7:8760:8836 [0] NCCL INFO comm 0x14df3c001060 rank 1 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n7:8762:8851 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n7:8762:8851 [2] NCCL INFO comm 0x145fe4001060 rank 1 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE + > number of parameters on model parallel rank 0: 917774336 > number of parameters on model parallel rank 2: 917774336 + +r6i4n5:37341:37495 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r6i4n5:37341:37495 [2] NCCL INFO comm 0x14a55c001060 rank 0 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r6i4n5:37341:37341 [2] NCCL INFO Launch mode Parallel +NCCL version 2.7.8+cuda10.2 + > number of parameters on model parallel rank 2: 917757952 +> learning rate decay style: cosine +NCCL version 2.7.8+cuda10.2 +DeepSpeed is enabled. +[2021-05-27 19:49:22,791] [INFO] [logging.py:60:log_dist] [Rank 0] DeepSpeed info: version=0.4.0+unknown, git-hash=unknown, git-branch=unknown +NCCL version 2.7.8+cuda10.2 +r9i1n7:8763:8857 [3] NCCL INFO Channel 00/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 01/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 02/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 03/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 04/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 05/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 06/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 07/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 08/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 09/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 10/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 11/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 12/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 13/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 14/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 15/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 16/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 17/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 18/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 19/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 20/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 21/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 22/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 23/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 24/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 25/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 26/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 27/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 28/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 29/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 30/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Channel 31/32 : 0 +r9i1n7:8763:8857 [3] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/-1/ +r9i1n7:8763:8857 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r6i4n5:37342:37500 [3] NCCL INFO Channel 00/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 01/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 02/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 03/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 04/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 05/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 06/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 07/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 08/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 09/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 10/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 11/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 12/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 13/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 14/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 15/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 16/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 17/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 18/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 19/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 20/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 21/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 22/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 23/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 24/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 25/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 26/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 27/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 28/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 29/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 30/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Channel 31/32 : 0 +r6i4n5:37342:37500 [3] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r6i4n5:37342:37500 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +NCCL version 2.7.8+cuda10.2 +r9i1n7:8763:8857 [3] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r9i1n7:8763:8857 [3] NCCL INFO comm 0x155074001060 rank 0 nranks 1 cudaDev 3 busId 8a000 - Init COMPLETE +r6i4n5:37342:37500 [3] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r6i4n5:37342:37500 [3] NCCL INFO comm 0x152c94001060 rank 0 nranks 1 cudaDev 3 busId 8a000 - Init COMPLETE +r6i4n5:37340:37503 [1] NCCL INFO Channel 00/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 01/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 02/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 03/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 04/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 05/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 06/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 07/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 08/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 09/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 10/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 11/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 12/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 13/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 14/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 15/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 16/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 17/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 18/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 19/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 20/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 21/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 22/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 23/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 24/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 25/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 26/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 27/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 28/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 29/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 30/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Channel 31/32 : 0 +r6i4n5:37340:37503 [1] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r6i4n5:37340:37503 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n7:8761:8859 [1] NCCL INFO Channel 00/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 01/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 02/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 03/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 04/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 05/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 06/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 07/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 08/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 09/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 10/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 11/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 12/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 13/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 14/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 15/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 16/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 17/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 18/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 19/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 20/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 21/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 22/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 23/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 24/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 25/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 26/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 27/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 28/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 29/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 30/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Channel 31/32 : 0 +r9i1n7:8761:8859 [1] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/-1/ +r9i1n7:8761:8859 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r6i4n5:37340:37503 [1] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r6i4n5:37340:37503 [1] NCCL INFO comm 0x154408001060 rank 0 nranks 1 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n7:8761:8859 [1] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r9i1n7:8761:8859 [1] NCCL INFO comm 0x154a5c001060 rank 0 nranks 1 cudaDev 1 busId 1c000 - Init COMPLETE +r6i4n5:37339:37505 [0] NCCL INFO Channel 00/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 01/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 02/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 03/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 04/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 05/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 06/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 07/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 08/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 09/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 10/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 11/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 12/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 13/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 14/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 15/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 16/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 17/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 18/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 19/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 20/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 21/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 22/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 23/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 24/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 25/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 26/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 27/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 28/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 29/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 30/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Channel 31/32 : 0 +r6i4n5:37339:37505 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r6i4n5:37339:37505 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n7:8760:8861 [0] NCCL INFO Channel 00/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 01/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 02/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 03/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 04/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 05/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 06/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 07/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 08/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 09/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 10/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 11/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 12/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 13/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 14/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 15/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 16/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 17/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 18/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 19/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 20/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 21/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 22/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 23/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 24/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 25/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 26/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 27/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 28/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 29/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 30/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Channel 31/32 : 0 +r9i1n7:8760:8861 [0] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/-1/ +r9i1n7:8760:8861 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n7:8760:8861 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r9i1n7:8760:8861 [0] NCCL INFO comm 0x14df18001060 rank 0 nranks 1 cudaDev 0 busId 1a000 - Init COMPLETE +r6i4n5:37339:37505 [0] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r6i4n5:37339:37505 [0] NCCL INFO comm 0x147ec4001060 rank 0 nranks 1 cudaDev 0 busId 1a000 - Init COMPLETE +[2021-05-27 19:49:22,834] [INFO] [engine.py:172:__init__] DeepSpeed Flops Profiler Enabled: False +[2021-05-27 19:49:22,834] [INFO] [engine.py:682:_configure_optimizer] Removing param_group that has no 'params' in the client Optimizer +[2021-05-27 19:49:22,834] [INFO] [engine.py:687:_configure_optimizer] Using client Optimizer as basic optimizer +[2021-05-27 19:49:22,834] [INFO] [engine.py:696:_configure_optimizer] DeepSpeed Basic Optimizer = FusedAdam +[2021-05-27 19:49:22,834] [INFO] [logging.py:60:log_dist] [Rank 0] Creating fp16 unfused optimizer with dynamic loss scale +[2021-05-27 19:49:22,834] [INFO] [unfused_optimizer.py:37:__init__] Fused Lamb Legacy : False +r6i4n5:37341:37507 [2] NCCL INFO Channel 00/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 01/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 02/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 03/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 04/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 05/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 06/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 07/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 08/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 09/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 10/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 11/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 12/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 13/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 14/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 15/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 16/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 17/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 18/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 19/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 20/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 21/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 22/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 23/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 24/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 25/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 26/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 27/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 28/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 29/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 30/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Channel 31/32 : 0 +r6i4n5:37341:37507 [2] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/- +r6i4n5:37341:37507 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n7:8762:8863 [2] NCCL INFO Channel 00/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 01/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 02/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 03/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 04/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 05/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 06/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 07/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 08/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 09/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 10/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 11/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 12/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 13/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 14/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 15/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 16/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 17/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 18/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 19/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 20/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 21/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 22/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 23/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 24/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 25/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 26/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 27/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 28/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 29/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 30/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Channel 31/32 : 0 +r9i1n7:8762:8863 [2] NCCL INFO Trees [0] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [1] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [2] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [3] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [4] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [5] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [6] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [7] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [8] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [9] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [10] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [11] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [12] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [13] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [14] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [15] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [16] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [17] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [18] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [19] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [20] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [21] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [22] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [23] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [24] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [25] -1/-1/-1->0->-1|-1->0->-1/-1/-1 [26] -1/-1/-1->0->-1|-1->0->-1/-1/ +r9i1n7:8762:8863 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r6i4n5:37341:37507 [2] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r6i4n5:37341:37507 [2] NCCL INFO comm 0x14a568001060 rank 0 nranks 1 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n7:8762:8863 [2] NCCL INFO 32 coll channels, 32 p2p channels, 32 p2p channels per peer +r9i1n7:8762:8863 [2] NCCL INFO comm 0x145fc0001060 rank 0 nranks 1 cudaDev 2 busId 88000 - Init COMPLETE +[2021-05-27 19:49:22,947] [INFO] [logging.py:60:log_dist] [Rank 0] DeepSpeed Final Optimizer = FusedAdam +[2021-05-27 19:49:22,947] [INFO] [engine.py:509:_configure_lr_scheduler] DeepSpeed using client LR scheduler +[2021-05-27 19:49:22,947] [INFO] [logging.py:60:log_dist] [Rank 0] DeepSpeed LR Scheduler = +[2021-05-27 19:49:22,947] [INFO] [logging.py:60:log_dist] [Rank 0] step=0, skipped=0, lr=[0.0, 0.0], mom=[(0.9, 0.999), (0.9, 0.999)] +[2021-05-27 19:49:22,947] [INFO] [config.py:900:print] DeepSpeedEngine configuration: +[2021-05-27 19:49:22,947] [INFO] [config.py:904:print] activation_checkpointing_config { + "partition_activations": false, + "contiguous_memory_optimization": false, + "cpu_checkpointing": false, + "number_checkpoints": null, + "synchronize_checkpoint_boundary": false, + "profile": false +} +[2021-05-27 19:49:22,947] [INFO] [config.py:904:print] aio_config ................... {'block_size': 1048576, 'queue_depth': 8, 'thread_count': 1, 'single_submit': False, 'overlap_events': True} +[2021-05-27 19:49:22,947] [INFO] [config.py:904:print] allreduce_always_fp32 ........ False +[2021-05-27 19:49:22,947] [INFO] [config.py:904:print] amp_enabled .................. False +[2021-05-27 19:49:22,947] [INFO] [config.py:904:print] amp_params ................... False +[2021-05-27 19:49:22,947] [INFO] [config.py:904:print] checkpoint_tag_validation_enabled True +[2021-05-27 19:49:22,947] [INFO] [config.py:904:print] checkpoint_tag_validation_fail False +[2021-05-27 19:49:22,947] [INFO] [config.py:904:print] disable_allgather ............ False +[2021-05-27 19:49:22,947] [INFO] [config.py:904:print] dump_state ................... False +[2021-05-27 19:49:22,947] [INFO] [config.py:904:print] dynamic_loss_scale_args ...... {'init_scale': 1024, 'scale_window': 1000, 'delayed_shift': 2, 'min_scale': 1} +[2021-05-27 19:49:22,947] [INFO] [config.py:904:print] eigenvalue_enabled ........... False +[2021-05-27 19:49:22,947] [INFO] [config.py:904:print] eigenvalue_gas_boundary_resolution 1 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] eigenvalue_layer_name ........ bert.encoder.layer +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] eigenvalue_layer_num ......... 0 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] eigenvalue_max_iter .......... 100 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] eigenvalue_stability ......... 1e-06 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] eigenvalue_tol ............... 0.01 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] eigenvalue_verbose ........... False +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] elasticity_enabled ........... False +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] flops_profiler_config ........ { + "enabled": false, + "profile_step": 1, + "module_depth": -1, + "top_modules": 1, + "detailed": true, + "output_file": null +} +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] fp16_enabled ................. True +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] fp16_mixed_quantize .......... False +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] global_rank .................. 0 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] gradient_accumulation_steps .. 256 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] gradient_clipping ............ 1.0 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] gradient_predivide_factor .... 1.0 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] initial_dynamic_scale ........ 1024 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] loss_scale ................... 0 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] memory_breakdown ............. False +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] optimizer_legacy_fusion ...... False +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] optimizer_name ............... None +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] optimizer_params ............. None +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] pipeline ..................... {'stages': 'auto', 'partition': 'best', 'seed_layers': False, 'activation_checkpoint_interval': 0} +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] pld_enabled .................. False +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] pld_params ................... False +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] prescale_gradients ........... False +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] quantize_change_rate ......... 0.001 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] quantize_groups .............. 1 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] quantize_offset .............. 1000 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] quantize_period .............. 1000 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] quantize_rounding ............ 0 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] quantize_start_bits .......... 16 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] quantize_target_bits ......... 8 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] quantize_training_enabled .... False +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] quantize_type ................ 0 +[2021-05-27 19:49:22,948] [INFO] [config.py:904:print] quantize_verbose ............. False +[2021-05-27 19:49:22,949] [INFO] [config.py:904:print] scheduler_name ............... None +[2021-05-27 19:49:22,949] [INFO] [config.py:904:print] scheduler_params ............. None +[2021-05-27 19:49:22,949] [INFO] [config.py:904:print] sparse_attention ............. None +[2021-05-27 19:49:22,949] [INFO] [config.py:904:print] sparse_gradients_enabled ..... False +[2021-05-27 19:49:22,949] [INFO] [config.py:904:print] steps_per_print .............. 1 +[2021-05-27 19:49:22,949] [INFO] [config.py:904:print] tensorboard_enabled .......... False +[2021-05-27 19:49:22,949] [INFO] [config.py:904:print] tensorboard_job_name ......... DeepSpeedJobName +[2021-05-27 19:49:22,949] [INFO] [config.py:904:print] tensorboard_output_path ...... +[2021-05-27 19:49:22,949] [INFO] [config.py:904:print] train_batch_size ............. 1024 +[2021-05-27 19:49:22,949] [INFO] [config.py:904:print] train_micro_batch_size_per_gpu 4 +[2021-05-27 19:49:22,949] [INFO] [config.py:904:print] use_quantizer_kernel ......... False +[2021-05-27 19:49:22,949] [INFO] [config.py:904:print] wall_clock_breakdown ......... False +[2021-05-27 19:49:22,949] [INFO] [config.py:904:print] world_size ................... 1 +[2021-05-27 19:49:22,949] [INFO] [config.py:904:print] zero_allow_untested_optimizer False +[2021-05-27 19:49:22,949] [INFO] [config.py:904:print] zero_config .................. { + "stage": 0, + "contiguous_gradients": false, + "reduce_scatter": true, + "reduce_bucket_size": 5.000000e+08, + "allgather_partitions": true, + "allgather_bucket_size": 5.000000e+08, + "overlap_comm": false, + "load_from_fp32_weights": true, + "elastic_checkpoint": true, + "offload_param": null, + "offload_optimizer": null, + "sub_group_size": 1.000000e+12, + "prefetch_bucket_size": 5.000000e+07, + "param_persistence_threshold": 1.000000e+05, + "max_live_parameters": 1.000000e+09, + "max_reuse_distance": 1.000000e+09, + "gather_fp16_weights_on_model_save": false, + "ignore_unused_parameters": true, + "legacy_stage1": false +} +[2021-05-27 19:49:22,949] [INFO] [config.py:904:print] zero_enabled ................. False +[2021-05-27 19:49:22,949] [INFO] [config.py:904:print] zero_optimization_stage ...... 0 +[2021-05-27 19:49:22,949] [INFO] [config.py:906:print] json = { + "train_micro_batch_size_per_gpu": 4, + "gradient_accumulation_steps": 256, + "steps_per_print": 1, + "gradient_clipping": 1.0, + "fp16": { + "enabled": true, + "loss_scale": 0, + "initial_scale_power": 10, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "wall_clock_breakdown": false, + "zero_allow_untested_optimizer": false +} +[2021-05-27 19:49:22,950] [INFO] [engine.py:76:__init__] CONFIG: micro_batches=256 micro_batch_size=4 +r7i6n8:29155:29250 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i6n8:29155:29250 [3] NCCL INFO Trees [0] -1/-1/-1->31->30|30->31->-1/-1/-1 [1] 28/-1/-1->31->30|30->31->28/-1/-1 [2] -1/-1/-1->31->30|30->31->-1/-1/-1 [3] 28/-1/-1->31->30|30->31->28/-1/-1 +r7i6n8:29155:29250 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n0:55673:55768 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n0:55673:55768 [0] NCCL INFO Trees [0] 33/16/48->32->0|0->32->33/16/48 [1] 33/-1/-1->32->35|35->32->33/-1/-1 [2] 33/-1/-1->32->28|28->32->33/-1/-1 [3] 33/-1/-1->32->35|35->32->33/-1/-1 +r7i7n0:55673:55768 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i6n8:29153:29248 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i6n8:29154:29247 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n1:68404:68502 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n1:68404:68502 [0] NCCL INFO Trees [0] 37/-1/-1->36->40|40->36->37/-1/-1 [1] 37/-1/-1->36->39|39->36->37/-1/-1 [2] 37/20/52->36->4|4->36->37/20/52 [3] 37/-1/-1->36->39|39->36->37/-1/-1 +r7i6n8:29152:29249 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i6n8:29153:29248 [1] NCCL INFO Trees [0] 30/-1/-1->29->28|28->29->30/-1/-1 [1] -1/-1/-1->29->28|28->29->-1/-1/-1 [2] 30/-1/-1->29->28|28->29->30/-1/-1 [3] -1/-1/-1->29->28|28->29->-1/-1/-1 +r7i6n8:29154:29247 [2] NCCL INFO Trees [0] 31/-1/-1->30->29|29->30->31/-1/-1 [1] 31/-1/-1->30->26|26->30->31/-1/-1 [2] 31/-1/-1->30->29|29->30->31/-1/-1 [3] 31/26/34->30->22|22->30->31/26/34 +r7i6n8:29152:29249 [0] NCCL INFO Trees [0] 29/-1/-1->28->24|24->28->29/-1/-1 [1] 29/-1/-1->28->31|31->28->29/-1/-1 [2] 29/24/32->28->20|20->28->29/24/32 [3] 29/-1/-1->28->31|31->28->29/-1/-1 +r7i6n8:29154:29247 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i6n8:29153:29248 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i6n8:29152:29249 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i5n3:79997:80091 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i5n3:79997:80091 [3] NCCL INFO Trees [0] -1/-1/-1->27->26|26->27->-1/-1/-1 [1] 24/-1/-1->27->26|26->27->24/-1/-1 [2] -1/-1/-1->27->26|26->27->-1/-1/-1 [3] 24/-1/-1->27->26|26->27->24/-1/-1 +r7i4n5:46160:46254 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n5:46161:46256 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n5:46159:46255 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n5:46161:46256 [3] NCCL INFO Trees [0] -1/-1/-1->23->22|22->23->-1/-1/-1 [1] 20/-1/-1->23->22|22->23->20/-1/-1 [2] -1/-1/-1->23->22|22->23->-1/-1/-1 [3] 20/-1/-1->23->22|22->23->20/-1/-1 +r7i4n5:46160:46254 [2] NCCL INFO Trees [0] 23/-1/-1->22->21|21->22->23/-1/-1 [1] 23/-1/-1->22->26|26->22->23/-1/-1 [2] 23/-1/-1->22->21|21->22->23/-1/-1 [3] 23/14/30->22->38|38->22->23/14/30 +r7i4n5:46159:46255 [1] NCCL INFO Trees [0] 22/-1/-1->21->20|20->21->22/-1/-1 [1] -1/-1/-1->21->20|20->21->-1/-1/-1 [2] 22/-1/-1->21->20|20->21->22/-1/-1 [3] -1/-1/-1->21->20|20->21->-1/-1/-1 +r7i7n0:55674:55769 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n0:55674:55769 [1] NCCL INFO Trees [0] 34/-1/-1->33->32|32->33->34/-1/-1 [1] -1/-1/-1->33->32|32->33->-1/-1/-1 [2] 34/-1/-1->33->32|32->33->34/-1/-1 [3] -1/-1/-1->33->32|32->33->-1/-1/-1 +r7i7n0:55675:55770 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n0:55674:55769 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n0:55676:55771 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n0:55675:55770 [2] NCCL INFO Trees [0] 35/-1/-1->34->33|33->34->35/-1/-1 [1] 35/18/50->34->2|2->34->35/18/50 [2] 35/-1/-1->34->33|33->34->35/-1/-1 [3] 35/-1/-1->34->30|30->34->35/-1/-1 +r7i7n0:55676:55771 [3] NCCL INFO Trees [0] -1/-1/-1->35->34|34->35->-1/-1/-1 [1] 32/-1/-1->35->34|34->35->32/-1/-1 [2] -1/-1/-1->35->34|34->35->-1/-1/-1 [3] 32/-1/-1->35->34|34->35->32/-1/-1 +r7i7n0:55675:55770 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n0:55676:55771 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i4n4:1769:1913 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n4:1769:1913 [3] NCCL INFO Trees [0] -1/-1/-1->19->18|18->19->-1/-1/-1 [1] 16/-1/-1->19->18|18->19->16/-1/-1 [2] -1/-1/-1->19->18|18->19->-1/-1/-1 [3] 16/-1/-1->19->18|18->19->16/-1/-1 +r7i4n4:1769:1913 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i3n0:38601:38694 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n2:57608:57705 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n2:57609:57704 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n2:57610:57706 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n1:68404:68502 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n1:68405:68500 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n1:68405:68500 [1] NCCL INFO Trees [0] 38/-1/-1->37->36|36->37->38/-1/-1 [1] -1/-1/-1->37->36|36->37->-1/-1/-1 [2] 38/-1/-1->37->36|36->37->38/-1/-1 [3] -1/-1/-1->37->36|36->37->-1/-1/-1 +r7i7n1:68405:68500 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n1:68406:68501 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n1:68407:68499 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i0n3:57932:58028 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i0n3:57933:58029 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i0n3:57934:58031 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i0n3:57932:58028 [0] NCCL INFO Trees [0] 45/-1/-1->44->40|40->44->45/-1/-1 [1] 45/-1/-1->44->47|47->44->45/-1/-1 [2] 45/40/48->44->52|52->44->45/40/48 [3] 45/-1/-1->44->47|47->44->45/-1/-1 +r8i0n3:57933:58029 [1] NCCL INFO Trees [0] 46/-1/-1->45->44|44->45->46/-1/-1 [1] -1/-1/-1->45->44|44->45->-1/-1/-1 [2] 46/-1/-1->45->44|44->45->46/-1/-1 [3] -1/-1/-1->45->44|44->45->-1/-1/-1 +r8i0n3:57934:58031 [2] NCCL INFO Trees [0] 47/-1/-1->46->45|45->46->47/-1/-1 [1] 47/-1/-1->46->42|42->46->47/-1/-1 [2] 47/-1/-1->46->45|45->46->47/-1/-1 [3] 47/42/50->46->54|54->46->47/42/50 +r7i7n1:68406:68501 [2] NCCL INFO Trees [0] 39/-1/-1->38->37|37->38->39/-1/-1 [1] 39/-1/-1->38->42|42->38->39/-1/-1 [2] 39/-1/-1->38->37|37->38->39/-1/-1 [3] 39/22/54->38->6|6->38->39/22/54 +r7i7n1:68407:68499 [3] NCCL INFO Trees [0] -1/-1/-1->39->38|38->39->-1/-1/-1 [1] 36/-1/-1->39->38|38->39->36/-1/-1 [2] -1/-1/-1->39->38|38->39->-1/-1/-1 [3] 36/-1/-1->39->38|38->39->36/-1/-1 +r7i7n1:68406:68501 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n1:68407:68499 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i5n3:79997:80091 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i5n3:79994:80089 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i5n3:79995:80090 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i5n3:79996:80092 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i5n3:79994:80089 [0] NCCL INFO Trees [0] 25/20/28->24->16|16->24->25/20/28 [1] 25/-1/-1->24->27|27->24->25/-1/-1 [2] 25/-1/-1->24->28|28->24->25/-1/-1 [3] 25/-1/-1->24->27|27->24->25/-1/-1 +r7i2n6:894:989 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i2n6:895:990 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i2n6:894:989 [2] NCCL INFO Trees [0] 11/-1/-1->10->9|9->10->11/-1/-1 [1] 11/6/14->10->18|18->10->11/6/14 [2] 11/-1/-1->10->9|9->10->11/-1/-1 [3] 11/-1/-1->10->14|14->10->11/-1/-1 +r7i2n6:895:990 [3] NCCL INFO Trees [0] -1/-1/-1->11->10|10->11->-1/-1/-1 [1] 8/-1/-1->11->10|10->11->8/-1/-1 [2] -1/-1/-1->11->10|10->11->-1/-1/-1 [3] 8/-1/-1->11->10|10->11->8/-1/-1 +r7i5n3:79995:80090 [1] NCCL INFO Trees [0] 26/-1/-1->25->24|24->25->26/-1/-1 [1] -1/-1/-1->25->24|24->25->-1/-1/-1 [2] 26/-1/-1->25->24|24->25->26/-1/-1 [3] -1/-1/-1->25->24|24->25->-1/-1/-1 +r7i5n3:79996:80092 [2] NCCL INFO Trees [0] 27/-1/-1->26->25|25->26->27/-1/-1 [1] 27/22/30->26->18|18->26->27/22/30 [2] 27/-1/-1->26->25|25->26->27/-1/-1 [3] 27/-1/-1->26->30|30->26->27/-1/-1 +r7i5n3:79994:80089 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i5n3:79995:80090 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i5n3:79996:80092 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n4:24819:24914 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n4:24818:24916 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n4:24818:24916 [0] NCCL INFO Trees [0] 49/40/56->48->32|32->48->49/40/56 [1] 49/-1/-1->48->51|51->48->49/-1/-1 [2] 49/-1/-1->48->44|44->48->49/-1/-1 [3] 49/-1/-1->48->51|51->48->49/-1/-1 +r9i1n4:24819:24914 [1] NCCL INFO Trees [0] 50/-1/-1->49->48|48->49->50/-1/-1 [1] -1/-1/-1->49->48|48->49->-1/-1/-1 [2] 50/-1/-1->49->48|48->49->50/-1/-1 [3] -1/-1/-1->49->48|48->49->-1/-1/-1 +r7i1n3:4943:5039 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i1n3:4944:5037 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i1n3:4943:5039 [2] NCCL INFO Trees [0] 7/-1/-1->6->5|5->6->7/-1/-1 [1] 7/-1/-1->6->10|10->6->7/-1/-1 [2] 7/-1/-1->6->5|5->6->7/-1/-1 [3] 7/38/-1->6->-1|-1->6->7/38/-1 +r7i1n3:4944:5037 [3] NCCL INFO Trees [0] -1/-1/-1->7->6|6->7->-1/-1/-1 [1] 4/-1/-1->7->6|6->7->4/-1/-1 [2] -1/-1/-1->7->6|6->7->-1/-1/-1 [3] 4/-1/-1->7->6|6->7->4/-1/-1 +r7i4n5:46161:46256 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i4n5:46160:46254 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n5:46159:46255 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n5:46158:46253 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n5:46158:46253 [0] NCCL INFO Trees [0] 21/-1/-1->20->24|24->20->21/-1/-1 [1] 21/-1/-1->20->23|23->20->21/-1/-1 [2] 21/12/28->20->36|36->20->21/12/28 [3] 21/-1/-1->20->23|23->20->21/-1/-1 +r7i4n5:46158:46253 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r6i4n5:37342:37514 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r6i4n5:37341:37516 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r6i4n5:37341:37516 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] 3/34/-1->2->-1|-1->2->3/34/-1 [2] 3/-1/-1->2->1|1->2->3/-1/-1 [3] 3/-1/-1->2->62|62->2->3/-1/-1 +r6i4n5:37339:37513 [0] NCCL INFO Channel 00/04 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 +r6i4n5:37342:37514 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->2|2->3->0/-1/-1 [2] -1/-1/-1->3->2|2->3->-1/-1/-1 [3] 0/-1/-1->3->2|2->3->0/-1/-1 +r9i1n7:8762:8871 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n7:8763:8868 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n7:8761:8869 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n7:8762:8871 [2] NCCL INFO Trees [0] 63/-1/-1->62->61|61->62->63/-1/-1 [1] 63/-1/-1->62->58|58->62->63/-1/-1 [2] 63/-1/-1->62->61|61->62->63/-1/-1 [3] 63/58/2->62->54|54->62->63/58/2 +r9i1n5:40815:40911 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n5:40816:40910 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n5:40813:40909 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n5:40814:40908 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n5:40816:40910 [3] NCCL INFO Trees [0] -1/-1/-1->55->54|54->55->-1/-1/-1 [1] 52/-1/-1->55->54|54->55->52/-1/-1 [2] -1/-1/-1->55->54|54->55->-1/-1/-1 [3] 52/-1/-1->55->54|54->55->52/-1/-1 +r9i1n5:40815:40911 [2] NCCL INFO Trees [0] 55/-1/-1->54->53|53->54->55/-1/-1 [1] 55/-1/-1->54->58|58->54->55/-1/-1 [2] 55/-1/-1->54->53|53->54->55/-1/-1 [3] 55/46/62->54->38|38->54->55/46/62 +r9i1n5:40813:40909 [0] NCCL INFO Trees [0] 53/-1/-1->52->56|56->52->53/-1/-1 [1] 53/-1/-1->52->55|55->52->53/-1/-1 [2] 53/44/60->52->36|36->52->53/44/60 [3] 53/-1/-1->52->55|55->52->53/-1/-1 +r9i1n6:58312:58410 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n6:58313:58407 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n6:58315:58409 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n6:58314:58408 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n6:58312:58410 [0] NCCL INFO Trees [0] 57/52/60->56->48|48->56->57/52/60 [1] 57/-1/-1->56->59|59->56->57/-1/-1 [2] 57/-1/-1->56->60|60->56->57/-1/-1 [3] 57/-1/-1->56->59|59->56->57/-1/-1 +r9i1n6:58313:58407 [1] NCCL INFO Trees [0] 58/-1/-1->57->56|56->57->58/-1/-1 [1] -1/-1/-1->57->56|56->57->-1/-1/-1 [2] 58/-1/-1->57->56|56->57->58/-1/-1 [3] -1/-1/-1->57->56|56->57->-1/-1/-1 +r9i1n6:58315:58409 [3] NCCL INFO Trees [0] -1/-1/-1->59->58|58->59->-1/-1/-1 [1] 56/-1/-1->59->58|58->59->56/-1/-1 [2] -1/-1/-1->59->58|58->59->-1/-1/-1 [3] 56/-1/-1->59->58|58->59->56/-1/-1 +r7i4n4:1768:1915 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n4:1766:1916 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n4:1767:1914 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n4:1768:1915 [2] NCCL INFO Trees [0] 19/-1/-1->18->17|17->18->19/-1/-1 [1] 19/10/26->18->34|34->18->19/10/26 [2] 19/-1/-1->18->17|17->18->19/-1/-1 [3] 19/-1/-1->18->14|14->18->19/-1/-1 +r7i4n4:1766:1916 [0] NCCL INFO Trees [0] 17/8/24->16->32|32->16->17/8/24 [1] 17/-1/-1->16->19|19->16->17/-1/-1 [2] 17/-1/-1->16->12|12->16->17/-1/-1 [3] 17/-1/-1->16->19|19->16->17/-1/-1 +r7i4n4:1767:1914 [1] NCCL INFO Trees [0] 18/-1/-1->17->16|16->17->18/-1/-1 [1] -1/-1/-1->17->16|16->17->-1/-1/-1 [2] 18/-1/-1->17->16|16->17->18/-1/-1 [3] -1/-1/-1->17->16|16->17->-1/-1/-1 +r7i4n4:1768:1915 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n4:1766:1916 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i3n0:38601:38694 [3] NCCL INFO Trees [0] -1/-1/-1->15->14|14->15->-1/-1/-1 [1] 12/-1/-1->15->14|14->15->12/-1/-1 [2] -1/-1/-1->15->14|14->15->-1/-1/-1 [3] 12/-1/-1->15->14|14->15->12/-1/-1 +r7i3n0:38601:38694 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i4n4:1767:1914 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i3n0:38600:38696 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i3n0:38600:38696 [2] NCCL INFO Trees [0] 15/-1/-1->14->13|13->14->15/-1/-1 [1] 15/-1/-1->14->10|10->14->15/-1/-1 [2] 15/-1/-1->14->13|13->14->15/-1/-1 [3] 15/10/18->14->22|22->14->15/10/18 +r7i3n0:38600:38696 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i3n0:38598:38695 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i3n0:38599:38693 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i3n0:38599:38693 [1] NCCL INFO Trees [0] 14/-1/-1->13->12|12->13->14/-1/-1 [1] -1/-1/-1->13->12|12->13->-1/-1/-1 [2] 14/-1/-1->13->12|12->13->14/-1/-1 [3] -1/-1/-1->13->12|12->13->-1/-1/-1 +r7i3n0:38598:38695 [0] NCCL INFO Trees [0] 13/-1/-1->12->8|8->12->13/-1/-1 [1] 13/-1/-1->12->15|15->12->13/-1/-1 [2] 13/8/16->12->20|20->12->13/8/16 [3] 13/-1/-1->12->15|15->12->13/-1/-1 +r7i7n2:57611:57703 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n2:57608:57705 [0] NCCL INFO Trees [0] 41/36/44->40->48|48->40->41/36/44 [1] 41/-1/-1->40->43|43->40->41/-1/-1 [2] 41/-1/-1->40->44|44->40->41/-1/-1 [3] 41/-1/-1->40->43|43->40->41/-1/-1 +r7i7n2:57609:57704 [1] NCCL INFO Trees [0] 42/-1/-1->41->40|40->41->42/-1/-1 [1] -1/-1/-1->41->40|40->41->-1/-1/-1 [2] 42/-1/-1->41->40|40->41->42/-1/-1 [3] -1/-1/-1->41->40|40->41->-1/-1/-1 +r7i7n2:57610:57706 [2] NCCL INFO Trees [0] 43/-1/-1->42->41|41->42->43/-1/-1 [1] 43/38/46->42->50|50->42->43/38/46 [2] 43/-1/-1->42->41|41->42->43/-1/-1 [3] 43/-1/-1->42->46|46->42->43/-1/-1 +r7i7n2:57611:57703 [3] NCCL INFO Trees [0] -1/-1/-1->43->42|42->43->-1/-1/-1 [1] 40/-1/-1->43->42|42->43->40/-1/-1 [2] -1/-1/-1->43->42|42->43->-1/-1/-1 [3] 40/-1/-1->43->42|42->43->40/-1/-1 +r7i7n2:57608:57705 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i3n0:38599:38693 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i3n0:38598:38695 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n2:57609:57704 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n2:57610:57706 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n2:57611:57703 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r8i0n3:57932:58028 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r8i0n3:57933:58029 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r8i0n3:57934:58031 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r8i0n3:57935:58030 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i0n3:57935:58030 [3] NCCL INFO Trees [0] -1/-1/-1->47->46|46->47->-1/-1/-1 [1] 44/-1/-1->47->46|46->47->44/-1/-1 [2] -1/-1/-1->47->46|46->47->-1/-1/-1 [3] 44/-1/-1->47->46|46->47->44/-1/-1 +r8i0n3:57935:58030 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i2n6:895:990 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i2n6:894:989 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i2n6:892:988 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i2n6:893:987 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i2n6:892:988 [0] NCCL INFO Trees [0] 9/4/12->8->16|16->8->9/4/12 [1] 9/-1/-1->8->11|11->8->9/-1/-1 [2] 9/-1/-1->8->12|12->8->9/-1/-1 [3] 9/-1/-1->8->11|11->8->9/-1/-1 +r7i2n6:893:987 [1] NCCL INFO Trees [0] 10/-1/-1->9->8|8->9->10/-1/-1 [1] -1/-1/-1->9->8|8->9->-1/-1/-1 [2] 10/-1/-1->9->8|8->9->10/-1/-1 [3] -1/-1/-1->9->8|8->9->-1/-1/-1 +r7i2n6:892:988 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i2n6:893:987 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n4:24818:24916 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n4:24819:24914 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n4:24820:24917 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n4:24820:24917 [2] NCCL INFO Trees [0] 51/-1/-1->50->49|49->50->51/-1/-1 [1] 51/42/58->50->34|34->50->51/42/58 [2] 51/-1/-1->50->49|49->50->51/-1/-1 [3] 51/-1/-1->50->46|46->50->51/-1/-1 +r9i1n4:24821:24915 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n4:24820:24917 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n4:24821:24915 [3] NCCL INFO Trees [0] -1/-1/-1->51->50|50->51->-1/-1/-1 [1] 48/-1/-1->51->50|50->51->48/-1/-1 [2] -1/-1/-1->51->50|50->51->-1/-1/-1 [3] 48/-1/-1->51->50|50->51->48/-1/-1 +r9i1n4:24821:24915 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i1n3:4943:5039 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i1n3:4944:5037 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i1n3:4941:5040 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i1n3:4941:5040 [0] NCCL INFO Trees [0] 5/-1/-1->4->8|8->4->5/-1/-1 [1] 5/-1/-1->4->7|7->4->5/-1/-1 [2] 5/36/-1->4->-1|-1->4->5/36/-1 [3] 5/-1/-1->4->7|7->4->5/-1/-1 +r7i1n3:4942:5038 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i1n3:4941:5040 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i1n3:4942:5038 [1] NCCL INFO Trees [0] 6/-1/-1->5->4|4->5->6/-1/-1 [1] -1/-1/-1->5->4|4->5->-1/-1/-1 [2] 6/-1/-1->5->4|4->5->6/-1/-1 [3] -1/-1/-1->5->4|4->5->-1/-1/-1 +r7i1n3:4942:5038 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r6i4n5:37341:37516 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r6i4n5:37340:37515 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r6i4n5:37342:37514 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r6i4n5:37339:37513 [0] NCCL INFO Channel 01/04 : 0 3 2 1 4 7 6 5 8 11 10 9 12 15 14 13 16 19 18 17 +r6i4n5:37340:37515 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] -1/-1/-1->1->0|0->1->-1/-1/-1 [2] 2/-1/-1->1->0|0->1->2/-1/-1 [3] -1/-1/-1->1->0|0->1->-1/-1/-1 +r6i4n5:37339:37513 [0] NCCL INFO Channel 02/04 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 +r6i4n5:37340:37515 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r6i4n5:37339:37513 [0] NCCL INFO Channel 03/04 : 0 3 2 1 4 7 6 5 8 11 10 9 12 15 14 13 16 19 18 17 +r6i4n5:37339:37513 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r6i4n5:37339:37513 [0] NCCL INFO Trees [0] 1/32/-1->0->-1|-1->0->1/32/-1 [1] 1/-1/-1->0->3|3->0->1/-1/-1 [2] 1/-1/-1->0->60|60->0->1/-1/-1 [3] 1/-1/-1->0->3|3->0->1/-1/-1 +r9i1n7:8762:8871 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n7:8763:8868 [3] NCCL INFO Trees [0] -1/-1/-1->63->62|62->63->-1/-1/-1 [1] 60/-1/-1->63->62|62->63->60/-1/-1 [2] -1/-1/-1->63->62|62->63->-1/-1/-1 [3] 60/-1/-1->63->62|62->63->60/-1/-1 +r9i1n7:8761:8869 [1] NCCL INFO Trees [0] 62/-1/-1->61->60|60->61->62/-1/-1 [1] -1/-1/-1->61->60|60->61->-1/-1/-1 [2] 62/-1/-1->61->60|60->61->62/-1/-1 [3] -1/-1/-1->61->60|60->61->-1/-1/-1 +r9i1n7:8763:8868 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n7:8761:8869 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n7:8760:8870 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r6i4n5:37339:37513 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n7:8760:8870 [0] NCCL INFO Trees [0] 61/-1/-1->60->56|56->60->61/-1/-1 [1] 61/-1/-1->60->63|63->60->61/-1/-1 [2] 61/56/0->60->52|52->60->61/56/0 [3] 61/-1/-1->60->63|63->60->61/-1/-1 +r9i1n7:8760:8870 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i6n8:29153:29248 [1] NCCL INFO Channel 00 : 29[1c000] -> 30[88000] via P2P/IPC +r9i1n5:40814:40908 [1] NCCL INFO Trees [0] 54/-1/-1->53->52|52->53->54/-1/-1 [1] -1/-1/-1->53->52|52->53->-1/-1/-1 [2] 54/-1/-1->53->52|52->53->54/-1/-1 [3] -1/-1/-1->53->52|52->53->-1/-1/-1 +r9i1n5:40816:40910 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n5:40813:40909 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n5:40815:40911 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n5:40814:40908 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n6:58314:58408 [2] NCCL INFO Trees [0] 59/-1/-1->58->57|57->58->59/-1/-1 [1] 59/54/62->58->50|50->58->59/54/62 [2] 59/-1/-1->58->57|57->58->59/-1/-1 [3] 59/-1/-1->58->62|62->58->59/-1/-1 +r9i1n6:58312:58410 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n6:58313:58407 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n6:58315:58409 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n6:58314:58408 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i5n3:79995:80090 [1] NCCL INFO Channel 00 : 25[1c000] -> 26[88000] via P2P/IPC +r7i2n6:894:989 [2] NCCL INFO Channel 00 : 10[88000] -> 11[8a000] via P2P/IPC +r7i4n5:46159:46255 [1] NCCL INFO Channel 00 : 21[1c000] -> 22[88000] via P2P/IPC +r9i1n7:8761:8869 [1] NCCL INFO Channel 00 : 61[1c000] -> 62[88000] via P2P/IPC +r7i3n0:38599:38693 [1] NCCL INFO Channel 00 : 13[1c000] -> 14[88000] via P2P/IPC +r6i4n5:37340:37515 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i5n3:79996:80092 [2] NCCL INFO Channel 00 : 26[88000] -> 27[8a000] via P2P/IPC +r7i7n1:68405:68500 [1] NCCL INFO Channel 00 : 37[1c000] -> 38[88000] via P2P/IPC +r7i7n0:55674:55769 [1] NCCL INFO Channel 00 : 33[1c000] -> 34[88000] via P2P/IPC +r7i2n6:893:987 [1] NCCL INFO Channel 00 : 9[1c000] -> 10[88000] via P2P/IPC +r7i4n5:46160:46254 [2] NCCL INFO Channel 00 : 22[88000] -> 23[8a000] via P2P/IPC +r9i1n7:8762:8871 [2] NCCL INFO Channel 00 : 62[88000] -> 63[8a000] via P2P/IPC +r6i4n5:37341:37516 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i3n0:38600:38696 [2] NCCL INFO Channel 00 : 14[88000] -> 15[8a000] via P2P/IPC +r9i1n5:40814:40908 [1] NCCL INFO Channel 00 : 53[1c000] -> 54[88000] via P2P/IPC +r7i7n2:57609:57704 [1] NCCL INFO Channel 00 : 41[1c000] -> 42[88000] via P2P/IPC +r7i7n1:68406:68501 [2] NCCL INFO Channel 00 : 38[88000] -> 39[8a000] via P2P/IPC +r7i7n0:55675:55770 [2] NCCL INFO Channel 00 : 34[88000] -> 35[8a000] via P2P/IPC +r7i1n3:4942:5038 [1] NCCL INFO Channel 00 : 5[1c000] -> 6[88000] via P2P/IPC +r7i4n4:1767:1914 [1] NCCL INFO Channel 00 : 17[1c000] -> 18[88000] via P2P/IPC +r8i0n3:57933:58029 [1] NCCL INFO Channel 00 : 45[1c000] -> 46[88000] via P2P/IPC +r9i1n6:58313:58407 [1] NCCL INFO Channel 00 : 57[1c000] -> 58[88000] via P2P/IPC +r9i1n4:24819:24914 [1] NCCL INFO Channel 00 : 49[1c000] -> 50[88000] via P2P/IPC +r9i1n5:40815:40911 [2] NCCL INFO Channel 00 : 54[88000] -> 55[8a000] via P2P/IPC +r7i7n2:57610:57706 [2] NCCL INFO Channel 00 : 42[88000] -> 43[8a000] via P2P/IPC +r7i1n3:4943:5039 [2] NCCL INFO Channel 00 : 6[88000] -> 7[8a000] via P2P/IPC +r7i4n4:1768:1915 [2] NCCL INFO Channel 00 : 18[88000] -> 19[8a000] via P2P/IPC +r8i0n3:57934:58031 [2] NCCL INFO Channel 00 : 46[88000] -> 47[8a000] via P2P/IPC +r9i1n6:58314:58408 [2] NCCL INFO Channel 00 : 58[88000] -> 59[8a000] via P2P/IPC +r9i1n4:24820:24917 [2] NCCL INFO Channel 00 : 50[88000] -> 51[8a000] via P2P/IPC +r7i2n6:894:989 [2] NCCL INFO Channel 00 : 10[88000] -> 9[1c000] via P2P/IPC +r9i1n7:8762:8871 [2] NCCL INFO Channel 00 : 62[88000] -> 61[1c000] via P2P/IPC +r6i4n5:37341:37516 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80092 [2] NCCL INFO Channel 00 : 26[88000] -> 25[1c000] via P2P/IPC +r7i3n0:38600:38696 [2] NCCL INFO Channel 00 : 14[88000] -> 13[1c000] via P2P/IPC +r7i7n1:68406:68501 [2] NCCL INFO Channel 00 : 38[88000] -> 37[1c000] via P2P/IPC +r7i4n5:46160:46254 [2] NCCL INFO Channel 00 : 22[88000] -> 21[1c000] via P2P/IPC +r7i7n0:55675:55770 [2] NCCL INFO Channel 00 : 34[88000] -> 33[1c000] via P2P/IPC +r9i1n5:40815:40911 [2] NCCL INFO Channel 00 : 54[88000] -> 53[1c000] via P2P/IPC +r7i7n2:57610:57706 [2] NCCL INFO Channel 00 : 42[88000] -> 41[1c000] via P2P/IPC +r8i0n3:57934:58031 [2] NCCL INFO Channel 00 : 46[88000] -> 45[1c000] via P2P/IPC +r7i1n3:4943:5039 [2] NCCL INFO Channel 00 : 6[88000] -> 5[1c000] via P2P/IPC +r7i4n4:1768:1915 [2] NCCL INFO Channel 00 : 18[88000] -> 17[1c000] via P2P/IPC +r9i1n4:24820:24917 [2] NCCL INFO Channel 00 : 50[88000] -> 49[1c000] via P2P/IPC +r9i1n6:58314:58408 [2] NCCL INFO Channel 00 : 58[88000] -> 57[1c000] via P2P/IPC +r7i7n0:55673:55768 [0] NCCL INFO Channel 00 : 31[8a000] -> 32[1a000] [receive] via NET/IB/3 +r7i5n3:79994:80089 [0] NCCL INFO Channel 00 : 23[8a000] -> 24[1a000] [receive] via NET/IB/3 +r7i4n5:46158:46253 [0] NCCL INFO Channel 00 : 19[8a000] -> 20[1a000] [receive] via NET/IB/3 +r7i7n1:68404:68502 [0] NCCL INFO Channel 00 : 35[8a000] -> 36[1a000] [receive] via NET/IB/3 +r7i6n8:29152:29249 [0] NCCL INFO Channel 00 : 27[8a000] -> 28[1a000] [receive] via NET/IB/3 +r7i4n4:1766:1916 [0] NCCL INFO Channel 00 : 15[8a000] -> 16[1a000] [receive] via NET/IB/3 +r8i0n3:57932:58028 [0] NCCL INFO Channel 00 : 43[8a000] -> 44[1a000] [receive] via NET/IB/3 +r7i7n2:57608:57705 [0] NCCL INFO Channel 00 : 39[8a000] -> 40[1a000] [receive] via NET/IB/3 +r9i1n5:40813:40909 [0] NCCL INFO Channel 00 : 51[8a000] -> 52[1a000] [receive] via NET/IB/3 +r9i1n4:24818:24916 [0] NCCL INFO Channel 00 : 47[8a000] -> 48[1a000] [receive] via NET/IB/3 +r7i1n3:4941:5040 [0] NCCL INFO Channel 00 : 3[8a000] -> 4[1a000] [receive] via NET/IB/3 +r7i6n8:29155:29250 [3] NCCL INFO Channel 00 : 31[8a000] -> 32[1a000] [send] via NET/IB/2 +r7i3n0:38598:38695 [0] NCCL INFO Channel 00 : 11[8a000] -> 12[1a000] [receive] via NET/IB/3 +r7i4n4:1769:1913 [3] NCCL INFO Channel 00 : 19[8a000] -> 20[1a000] [send] via NET/IB/2 +r7i4n5:46161:46256 [3] NCCL INFO Channel 00 : 23[8a000] -> 24[1a000] [send] via NET/IB/2 +r7i5n3:79997:80091 [3] NCCL INFO Channel 00 : 27[8a000] -> 28[1a000] [send] via NET/IB/2 +r9i1n7:8760:8870 [0] NCCL INFO Channel 00 : 59[8a000] -> 60[1a000] [receive] via NET/IB/3 +r9i1n6:58312:58410 [0] NCCL INFO Channel 00 : 55[8a000] -> 56[1a000] [receive] via NET/IB/3 +r7i2n6:892:988 [0] NCCL INFO Channel 00 : 7[8a000] -> 8[1a000] [receive] via NET/IB/3 +r6i4n5:37339:37513 [0] NCCL INFO Channel 00 : 63[8a000] -> 0[1a000] [receive] via NET/IB/3 +r7i3n0:38601:38694 [3] NCCL INFO Channel 00 : 15[8a000] -> 16[1a000] [send] via NET/IB/2 +r7i7n0:55676:55771 [3] NCCL INFO Channel 00 : 35[8a000] -> 36[1a000] [send] via NET/IB/2 +r7i7n1:68407:68499 [3] NCCL INFO Channel 00 : 39[8a000] -> 40[1a000] [send] via NET/IB/2 +r9i1n5:40816:40910 [3] NCCL INFO Channel 00 : 55[8a000] -> 56[1a000] [send] via NET/IB/2 +r9i1n4:24821:24915 [3] NCCL INFO Channel 00 : 51[8a000] -> 52[1a000] [send] via NET/IB/2 +r9i1n7:8763:8868 [3] NCCL INFO Channel 00 : 63[8a000] -> 0[1a000] [send] via NET/IB/2 +r7i7n2:57611:57703 [3] NCCL INFO Channel 00 : 43[8a000] -> 44[1a000] [send] via NET/IB/2 +r8i0n3:57935:58030 [3] NCCL INFO Channel 00 : 47[8a000] -> 48[1a000] [send] via NET/IB/2 +r9i1n6:58315:58409 [3] NCCL INFO Channel 00 : 59[8a000] -> 60[1a000] [send] via NET/IB/2 +r7i2n6:895:990 [3] NCCL INFO Channel 00 : 11[8a000] -> 12[1a000] [send] via NET/IB/2 +r7i1n3:4944:5037 [3] NCCL INFO Channel 00 : 7[8a000] -> 8[1a000] [send] via NET/IB/2 +r6i4n5:37342:37514 [3] NCCL INFO Channel 00 : 3[8a000] -> 4[1a000] [send] via NET/IB/2 +r9i1n7:8760:8870 [0] NCCL INFO Channel 00 : 60[1a000] -> 61[1c000] via P2P/IPC +r6i4n5:37339:37513 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i4n5:37340:37515 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8761:8869 [1] NCCL INFO Channel 00 : 61[1c000] -> 60[1a000] via P2P/IPC +r9i1n7:8763:8868 [3] NCCL INFO Channel 00 : 63[8a000] -> 62[88000] via P2P/IPC +r9i1n7:8762:8871 [2] NCCL INFO Channel 01 : 62[88000] -> 61[1c000] via P2P/IPC +r9i1n7:8763:8868 [3] NCCL INFO Channel 01 : 63[8a000] -> 62[88000] via P2P/IPC +r6i4n5:37339:37513 [0] NCCL INFO Channel 00 : 32[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i6n8:29154:29247 [2] NCCL INFO Channel 00 : 30[88000] -> 31[8a000] via P2P/IPC +r6i4n5:37340:37515 [1] NCCL INFO Channel 01 : 1[1c000] -> 4[1a000] [send] via NET/IB/3 +r7i3n0:38598:38695 [0] NCCL INFO Channel 00 : 12[1a000] -> 13[1c000] via P2P/IPC +r7i6n8:29154:29247 [2] NCCL INFO Channel 00 : 30[88000] -> 29[1c000] via P2P/IPC +r9i1n4:24818:24916 [0] NCCL INFO Channel 00 : 48[1a000] -> 49[1c000] via P2P/IPC +r7i1n3:4941:5040 [0] NCCL INFO Channel 00 : 4[1a000] -> 5[1c000] via P2P/IPC +r7i3n0:38599:38693 [1] NCCL INFO Channel 00 : 13[1c000] -> 12[1a000] via P2P/IPC +r7i2n6:892:988 [0] NCCL INFO Channel 00 : 8[1a000] -> 9[1c000] via P2P/IPC +r6i4n5:37342:37514 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24819:24914 [1] NCCL INFO Channel 00 : 49[1c000] -> 48[1a000] via P2P/IPC +r7i6n8:29152:29249 [0] NCCL INFO Channel 00 : 28[1a000] -> 29[1c000] via P2P/IPC +r7i2n6:895:990 [3] NCCL INFO Channel 00 : 11[8a000] -> 10[88000] via P2P/IPC +r6i4n5:37342:37514 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37341:37516 [2] NCCL INFO Channel 01 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40813:40909 [0] NCCL INFO Channel 00 : 52[1a000] -> 53[1c000] via P2P/IPC +r7i2n6:893:987 [1] NCCL INFO Channel 00 : 9[1c000] -> 8[1a000] via P2P/IPC +r8i0n3:57932:58028 [0] NCCL INFO Channel 00 : 44[1a000] -> 45[1c000] via P2P/IPC +r7i4n4:1766:1916 [0] NCCL INFO Channel 00 : 16[1a000] -> 17[1c000] via P2P/IPC +r7i7n0:55673:55768 [0] NCCL INFO Channel 00 : 32[1a000] -> 33[1c000] via P2P/IPC +r7i2n6:895:990 [3] NCCL INFO Channel 01 : 11[8a000] -> 10[88000] via P2P/IPC +r9i1n5:40814:40908 [1] NCCL INFO Channel 00 : 53[1c000] -> 52[1a000] via P2P/IPC +r9i1n4:24821:24915 [3] NCCL INFO Channel 00 : 51[8a000] -> 50[88000] via P2P/IPC +r7i3n0:38601:38694 [3] NCCL INFO Channel 00 : 15[8a000] -> 14[88000] via P2P/IPC +r8i0n3:57933:58029 [1] NCCL INFO Channel 00 : 45[1c000] -> 44[1a000] via P2P/IPC +r7i4n4:1767:1914 [1] NCCL INFO Channel 00 : 17[1c000] -> 16[1a000] via P2P/IPC +r7i1n3:4942:5038 [1] NCCL INFO Channel 00 : 5[1c000] -> 4[1a000] via P2P/IPC +r9i1n4:24821:24915 [3] NCCL INFO Channel 01 : 51[8a000] -> 50[88000] via P2P/IPC +r7i3n0:38598:38695 [0] NCCL INFO Channel 00 : 12[1a000] -> 8[1a000] [send] via NET/IB/3 +r9i1n4:24820:24917 [2] NCCL INFO Channel 01 : 50[88000] -> 49[1c000] via P2P/IPC +r7i3n0:38601:38694 [3] NCCL INFO Channel 01 : 15[8a000] -> 14[88000] via P2P/IPC +r7i1n3:4941:5040 [0] NCCL INFO Channel 00 : 4[1a000] -> 8[1a000] [send] via NET/IB/3 +r7i1n3:4944:5037 [3] NCCL INFO Channel 00 : 7[8a000] -> 6[88000] via P2P/IPC +r7i3n0:38599:38693 [1] NCCL INFO Channel 01 : 13[1c000] -> 16[1a000] [send] via NET/IB/3 +r7i7n1:68404:68502 [0] NCCL INFO Channel 00 : 36[1a000] -> 37[1c000] via P2P/IPC +r6i4n5:37341:37516 [2] NCCL INFO Channel 01 : 34[88000] -> 2[88000] [receive] via NET/IB/2 +r7i2n6:894:989 [2] NCCL INFO Channel 01 : 10[88000] -> 9[1c000] via P2P/IPC +r7i1n3:4944:5037 [3] NCCL INFO Channel 01 : 7[8a000] -> 6[88000] via P2P/IPC +r7i1n3:4943:5039 [2] NCCL INFO Channel 01 : 6[88000] -> 5[1c000] via P2P/IPC +r7i7n1:68405:68500 [1] NCCL INFO Channel 00 : 37[1c000] -> 36[1a000] via P2P/IPC +r9i1n5:40813:40909 [0] NCCL INFO Channel 00 : 52[1a000] -> 56[1a000] [send] via NET/IB/3 +r7i4n4:1766:1916 [0] NCCL INFO Channel 00 : 8[1a000] -> 16[1a000] [receive] via NET/IB/3 +r7i7n2:57608:57705 [0] NCCL INFO Channel 00 : 40[1a000] -> 41[1c000] via P2P/IPC +r9i1n5:40814:40908 [1] NCCL INFO Channel 01 : 53[1c000] -> 56[1a000] [send] via NET/IB/3 +r9i1n6:58312:58410 [0] NCCL INFO Channel 00 : 56[1a000] -> 57[1c000] via P2P/IPC +r7i7n0:55674:55769 [1] NCCL INFO Channel 00 : 33[1c000] -> 32[1a000] via P2P/IPC +r7i4n4:1767:1914 [1] NCCL INFO Channel 01 : 17[1c000] -> 20[1a000] [send] via NET/IB/3 +r7i5n3:79994:80089 [0] NCCL INFO Channel 00 : 24[1a000] -> 25[1c000] via P2P/IPC +r7i2n6:892:988 [0] NCCL INFO Channel 00 : 4[1a000] -> 8[1a000] [receive] via NET/IB/3 +r7i4n5:46158:46253 [0] NCCL INFO Channel 00 : 20[1a000] -> 21[1c000] via P2P/IPC +r7i7n0:55676:55771 [3] NCCL INFO Channel 00 : 35[8a000] -> 34[88000] via P2P/IPC +r7i7n1:68407:68499 [3] NCCL INFO Channel 00 : 39[8a000] -> 38[88000] via P2P/IPC +r9i1n5:40816:40910 [3] NCCL INFO Channel 00 : 55[8a000] -> 54[88000] via P2P/IPC +r7i2n6:893:987 [1] NCCL INFO Channel 01 : 9[1c000] -> 12[1a000] [send] via NET/IB/3 +r7i5n3:79997:80091 [3] NCCL INFO Channel 00 : 27[8a000] -> 26[88000] via P2P/IPC +r7i1n3:4942:5038 [1] NCCL INFO Channel 01 : 5[1c000] -> 8[1a000] [send] via NET/IB/3 +r8i0n3:57935:58030 [3] NCCL INFO Channel 00 : 47[8a000] -> 46[88000] via P2P/IPC +r7i7n0:55676:55771 [3] NCCL INFO Channel 01 : 35[8a000] -> 34[88000] via P2P/IPC +r7i7n2:57611:57703 [3] NCCL INFO Channel 00 : 43[8a000] -> 42[88000] via P2P/IPC +r7i6n8:29155:29250 [3] NCCL INFO Channel 00 : 31[8a000] -> 30[88000] via P2P/IPC +r7i7n2:57609:57704 [1] NCCL INFO Channel 00 : 41[1c000] -> 40[1a000] via P2P/IPC +r9i1n6:58315:58409 [3] NCCL INFO Channel 00 : 59[8a000] -> 58[88000] via P2P/IPC +r7i4n4:1769:1913 [3] NCCL INFO Channel 00 : 19[8a000] -> 18[88000] via P2P/IPC +r9i1n6:58313:58407 [1] NCCL INFO Channel 00 : 57[1c000] -> 56[1a000] via P2P/IPC +r9i1n5:40816:40910 [3] NCCL INFO Channel 01 : 55[8a000] -> 54[88000] via P2P/IPC +r7i4n5:46161:46256 [3] NCCL INFO Channel 00 : 23[8a000] -> 22[88000] via P2P/IPC +r7i2n6:894:989 [2] NCCL INFO Channel 01 : 6[88000] -> 10[88000] [receive] via NET/IB/2 +r7i5n3:79995:80090 [1] NCCL INFO Channel 00 : 25[1c000] -> 24[1a000] via P2P/IPC +r7i6n8:29155:29250 [3] NCCL INFO Channel 01 : 31[8a000] -> 30[88000] via P2P/IPC +r7i4n5:46159:46255 [1] NCCL INFO Channel 00 : 21[1c000] -> 20[1a000] via P2P/IPC +r7i1n3:4943:5039 [2] NCCL INFO Channel 01 : 6[88000] -> 10[88000] [send] via NET/IB/2 +r7i5n3:79997:80091 [3] NCCL INFO Channel 01 : 27[8a000] -> 26[88000] via P2P/IPC +r7i7n1:68407:68499 [3] NCCL INFO Channel 01 : 39[8a000] -> 38[88000] via P2P/IPC +r7i7n1:68406:68501 [2] NCCL INFO Channel 01 : 38[88000] -> 37[1c000] via P2P/IPC +r7i4n4:1769:1913 [3] NCCL INFO Channel 01 : 19[8a000] -> 18[88000] via P2P/IPC +r8i0n3:57935:58030 [3] NCCL INFO Channel 01 : 47[8a000] -> 46[88000] via P2P/IPC +r8i0n3:57934:58031 [2] NCCL INFO Channel 01 : 46[88000] -> 45[1c000] via P2P/IPC +r7i7n2:57610:57706 [2] NCCL INFO Channel 01 : 42[88000] -> 41[1c000] via P2P/IPC +r7i7n2:57611:57703 [3] NCCL INFO Channel 01 : 43[8a000] -> 42[88000] via P2P/IPC +r7i3n0:38600:38696 [2] NCCL INFO Channel 01 : 14[88000] -> 13[1c000] via P2P/IPC +r7i4n4:1766:1916 [0] NCCL INFO Channel 00 : 24[1a000] -> 16[1a000] [receive] via NET/IB/3 +r9i1n6:58314:58408 [2] NCCL INFO Channel 01 : 58[88000] -> 57[1c000] via P2P/IPC +r9i1n6:58315:58409 [3] NCCL INFO Channel 01 : 59[8a000] -> 58[88000] via P2P/IPC +r7i6n8:29152:29249 [0] NCCL INFO Channel 00 : 28[1a000] -> 24[1a000] [send] via NET/IB/3 +r7i4n5:46161:46256 [3] NCCL INFO Channel 01 : 23[8a000] -> 22[88000] via P2P/IPC +r7i4n5:46160:46254 [2] NCCL INFO Channel 01 : 22[88000] -> 21[1c000] via P2P/IPC +r7i2n6:892:988 [0] NCCL INFO Channel 00 : 12[1a000] -> 8[1a000] [receive] via NET/IB/3 +r9i1n7:8760:8870 [0] NCCL INFO Channel 00 : 60[1a000] -> 56[1a000] [send] via NET/IB/3 +r7i7n1:68404:68502 [0] NCCL INFO Channel 00 : 36[1a000] -> 40[1a000] [send] via NET/IB/3 +r7i1n3:4941:5040 [0] NCCL INFO Channel 00 : 8[1a000] -> 4[1a000] [receive] via NET/IB/3 +r8i0n3:57932:58028 [0] NCCL INFO Channel 00 : 44[1a000] -> 40[1a000] [send] via NET/IB/3 +r9i1n4:24818:24916 [0] NCCL INFO Channel 00 : 40[1a000] -> 48[1a000] [receive] via NET/IB/3 +r9i1n7:8761:8869 [1] NCCL INFO Channel 01 : 61[1c000] -> 0[1a000] [send] via NET/IB/3 +r7i6n8:29153:29248 [1] NCCL INFO Channel 00 : 29[1c000] -> 28[1a000] via P2P/IPC +r7i7n2:57608:57705 [0] NCCL INFO Channel 00 : 36[1a000] -> 40[1a000] [receive] via NET/IB/3 +r9i1n7:8762:8871 [2] NCCL INFO Channel 01 : 62[88000] -> 58[88000] [send] via NET/IB/2 +r7i7n0:55673:55768 [0] NCCL INFO Channel 00 : 16[1a000] -> 32[1a000] [receive] via NET/IB/3 +r7i2n6:894:989 [2] NCCL INFO Channel 01 : 14[88000] -> 10[88000] [receive] via NET/IB/2 +r9i1n6:58312:58410 [0] NCCL INFO Channel 00 : 52[1a000] -> 56[1a000] [receive] via NET/IB/3 +r9i1n4:24819:24914 [1] NCCL INFO Channel 01 : 49[1c000] -> 52[1a000] [send] via NET/IB/3 +r7i7n0:55674:55769 [1] NCCL INFO Channel 01 : 33[1c000] -> 36[1a000] [send] via NET/IB/3 +r8i0n3:57933:58029 [1] NCCL INFO Channel 01 : 45[1c000] -> 48[1a000] [send] via NET/IB/3 +r7i5n3:79994:80089 [0] NCCL INFO Channel 00 : 20[1a000] -> 24[1a000] [receive] via NET/IB/3 +r7i4n5:46158:46253 [0] NCCL INFO Channel 00 : 20[1a000] -> 24[1a000] [send] via NET/IB/3 +r7i7n1:68405:68500 [1] NCCL INFO Channel 01 : 37[1c000] -> 40[1a000] [send] via NET/IB/3 +r7i1n3:4943:5039 [2] NCCL INFO Channel 01 : 10[88000] -> 6[88000] [receive] via NET/IB/2 +r7i4n4:1766:1916 [0] NCCL INFO Channel 00 : 16[1a000] -> 32[1a000] [send] via NET/IB/3 +r7i7n1:68406:68501 [2] NCCL INFO Channel 01 : 38[88000] -> 42[88000] [send] via NET/IB/2 +r7i5n3:79995:80090 [1] NCCL INFO Channel 01 : 25[1c000] -> 28[1a000] [send] via NET/IB/3 +r8i0n3:57934:58031 [2] NCCL INFO Channel 01 : 46[88000] -> 42[88000] [send] via NET/IB/2 +r7i7n2:57609:57704 [1] NCCL INFO Channel 01 : 41[1c000] -> 44[1a000] [send] via NET/IB/3 +r7i7n2:57610:57706 [2] NCCL INFO Channel 01 : 38[88000] -> 42[88000] [receive] via NET/IB/2 +r7i3n0:38600:38696 [2] NCCL INFO Channel 01 : 14[88000] -> 10[88000] [send] via NET/IB/2 +r9i1n6:58313:58407 [1] NCCL INFO Channel 01 : 57[1c000] -> 60[1a000] [send] via NET/IB/3 +r9i1n6:58314:58408 [2] NCCL INFO Channel 01 : 54[88000] -> 58[88000] [receive] via NET/IB/2 +r7i2n6:892:988 [0] NCCL INFO Channel 00 : 8[1a000] -> 16[1a000] [send] via NET/IB/3 +r7i4n5:46159:46255 [1] NCCL INFO Channel 01 : 21[1c000] -> 24[1a000] [send] via NET/IB/3 +r7i3n0:38598:38695 [0] NCCL INFO Channel 00 : 8[1a000] -> 12[1a000] [receive] via NET/IB/3 +r9i1n4:24818:24916 [0] NCCL INFO Channel 00 : 56[1a000] -> 48[1a000] [receive] via NET/IB/3 +r7i7n0:55673:55768 [0] NCCL INFO Channel 00 : 48[1a000] -> 32[1a000] [receive] via NET/IB/3 +r7i7n2:57608:57705 [0] NCCL INFO Channel 00 : 44[1a000] -> 40[1a000] [receive] via NET/IB/3 +r9i1n6:58312:58410 [0] NCCL INFO Channel 00 : 60[1a000] -> 56[1a000] [receive] via NET/IB/3 +r7i6n8:29153:29248 [1] NCCL INFO Channel 01 : 29[1c000] -> 32[1a000] [send] via NET/IB/3 +r7i2n6:894:989 [2] NCCL INFO Channel 01 : 10[88000] -> 18[88000] [send] via NET/IB/2 +r9i1n4:24820:24917 [2] NCCL INFO Channel 01 : 42[88000] -> 50[88000] [receive] via NET/IB/2 +r7i7n1:68404:68502 [0] NCCL INFO Channel 00 : 40[1a000] -> 36[1a000] [receive] via NET/IB/3 +r7i5n3:79994:80089 [0] NCCL INFO Channel 00 : 28[1a000] -> 24[1a000] [receive] via NET/IB/3 +r9i1n5:40813:40909 [0] NCCL INFO Channel 00 : 56[1a000] -> 52[1a000] [receive] via NET/IB/3 +r9i1n5:40815:40911 [2] NCCL INFO Channel 01 : 54[88000] -> 53[1c000] via P2P/IPC +r7i4n5:46158:46253 [0] NCCL INFO Channel 00 : 24[1a000] -> 20[1a000] [receive] via NET/IB/3 +r7i3n0:38600:38696 [2] NCCL INFO Channel 01 : 10[88000] -> 14[88000] [receive] via NET/IB/2 +r7i3n0:38600:38696 [2] NCCL INFO Channel 01 : 14[88000] -> 15[8a000] via P2P/IPC +r7i5n3:79996:80092 [2] NCCL INFO Channel 01 : 26[88000] -> 25[1c000] via P2P/IPC +r9i1n4:24818:24916 [0] NCCL INFO Channel 00 : 48[1a000] -> 32[1a000] [send] via NET/IB/3 +r7i4n4:1768:1915 [2] NCCL INFO Channel 01 : 18[88000] -> 17[1c000] via P2P/IPC +r7i7n0:55675:55770 [2] NCCL INFO Channel 01 : 34[88000] -> 33[1c000] via P2P/IPC +r7i7n2:57608:57705 [0] NCCL INFO Channel 00 : 40[1a000] -> 48[1a000] [send] via NET/IB/3 +r7i7n0:55673:55768 [0] NCCL INFO Channel 00 : 32[1a000] -> 0[1a000] [send] via NET/IB/3 +r9i1n6:58312:58410 [0] NCCL INFO Channel 00 : 56[1a000] -> 48[1a000] [send] via NET/IB/3 +r7i5n3:79994:80089 [0] NCCL INFO Channel 00 : 24[1a000] -> 16[1a000] [send] via NET/IB/3 +r8i0n3:57932:58028 [0] NCCL INFO Channel 00 : 40[1a000] -> 44[1a000] [receive] via NET/IB/3 +r9i1n4:24820:24917 [2] NCCL INFO Channel 01 : 58[88000] -> 50[88000] [receive] via NET/IB/2 +r9i1n7:8760:8870 [0] NCCL INFO Channel 00 : 56[1a000] -> 60[1a000] [receive] via NET/IB/3 +r7i2n6:892:988 [0] NCCL INFO Channel 00 : 16[1a000] -> 8[1a000] [receive] via NET/IB/3 +r7i6n8:29152:29249 [0] NCCL INFO Channel 00 : 24[1a000] -> 28[1a000] [receive] via NET/IB/3 +r9i1n5:40815:40911 [2] NCCL INFO Channel 01 : 54[88000] -> 58[88000] [send] via NET/IB/2 +r7i5n3:79996:80092 [2] NCCL INFO Channel 01 : 22[88000] -> 26[88000] [receive] via NET/IB/2 +r7i7n0:55675:55770 [2] NCCL INFO Channel 01 : 18[88000] -> 34[88000] [receive] via NET/IB/2 +r7i4n4:1768:1915 [2] NCCL INFO Channel 01 : 10[88000] -> 18[88000] [receive] via NET/IB/2 +r9i1n4:24820:24917 [2] NCCL INFO Channel 01 : 50[88000] -> 34[88000] [send] via NET/IB/2 +r7i6n8:29154:29247 [2] NCCL INFO Channel 01 : 30[88000] -> 29[1c000] via P2P/IPC +r6i4n5:37339:37513 [0] NCCL INFO Channel 00 : 0[1a000] -> 32[1a000] [send] via NET/IB/3 +r7i2n6:892:988 [0] NCCL INFO Channel 00 : 8[1a000] -> 4[1a000] [send] via NET/IB/3 +r7i4n4:1766:1916 [0] NCCL INFO Channel 00 : 32[1a000] -> 16[1a000] [receive] via NET/IB/3 +r9i1n4:24818:24916 [0] NCCL INFO Channel 00 : 32[1a000] -> 48[1a000] [receive] via NET/IB/3 +r7i7n2:57608:57705 [0] NCCL INFO Channel 00 : 48[1a000] -> 40[1a000] [receive] via NET/IB/3 +r7i7n0:55673:55768 [0] NCCL INFO Channel 00 : 0[1a000] -> 32[1a000] [receive] via NET/IB/3 +r7i1n3:4943:5039 [2] NCCL INFO Channel 01 : 6[88000] -> 7[8a000] via P2P/IPC +r9i1n6:58312:58410 [0] NCCL INFO Channel 00 : 48[1a000] -> 56[1a000] [receive] via NET/IB/3 +r7i5n3:79994:80089 [0] NCCL INFO Channel 00 : 16[1a000] -> 24[1a000] [receive] via NET/IB/3 +r7i5n3:79996:80092 [2] NCCL INFO Channel 01 : 30[88000] -> 26[88000] [receive] via NET/IB/2 +r7i7n0:55675:55770 [2] NCCL INFO Channel 01 : 50[88000] -> 34[88000] [receive] via NET/IB/2 +r7i4n4:1768:1915 [2] NCCL INFO Channel 01 : 26[88000] -> 18[88000] [receive] via NET/IB/2 +r7i4n5:46160:46254 [2] NCCL INFO Channel 01 : 22[88000] -> 26[88000] [send] via NET/IB/2 +r7i6n8:29154:29247 [2] NCCL INFO Channel 01 : 30[88000] -> 26[88000] [send] via NET/IB/2 +r7i4n4:1766:1916 [0] NCCL INFO Channel 00 : 16[1a000] -> 8[1a000] [send] via NET/IB/3 +r7i2n6:892:988 [0] NCCL INFO Channel 00 : 8[1a000] -> 12[1a000] [send] via NET/IB/3 +r9i1n4:24818:24916 [0] NCCL INFO Channel 00 : 48[1a000] -> 40[1a000] [send] via NET/IB/3 +r7i7n0:55673:55768 [0] NCCL INFO Channel 00 : 32[1a000] -> 16[1a000] [send] via NET/IB/3 +r7i7n2:57608:57705 [0] NCCL INFO Channel 00 : 40[1a000] -> 36[1a000] [send] via NET/IB/3 +r7i2n6:894:989 [2] NCCL INFO Channel 01 : 18[88000] -> 10[88000] [receive] via NET/IB/2 +r7i7n2:57610:57706 [2] NCCL INFO Channel 01 : 46[88000] -> 42[88000] [receive] via NET/IB/2 +r9i1n6:58312:58410 [0] NCCL INFO Channel 00 : 56[1a000] -> 52[1a000] [send] via NET/IB/3 +r7i2n6:894:989 [2] NCCL INFO Channel 01 : 10[88000] -> 11[8a000] via P2P/IPC +r7i5n3:79994:80089 [0] NCCL INFO Channel 00 : 24[1a000] -> 20[1a000] [send] via NET/IB/3 +r7i7n1:68406:68501 [2] NCCL INFO Channel 01 : 42[88000] -> 38[88000] [receive] via NET/IB/2 +r6i4n5:37339:37513 [0] NCCL INFO Channel 01 : 61[1c000] -> 0[1a000] [receive] via NET/IB/3 +r6i4n5:37339:37513 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79996:80092 [2] NCCL INFO Channel 01 : 26[88000] -> 18[88000] [send] via NET/IB/2 +r7i7n0:55675:55770 [2] NCCL INFO Channel 01 : 34[88000] -> 2[88000] [send] via NET/IB/2 +r9i1n7:8761:8869 [1] NCCL INFO Channel 01 : 61[1c000] -> 60[1a000] via P2P/IPC +r7i4n4:1768:1915 [2] NCCL INFO Channel 01 : 18[88000] -> 34[88000] [send] via NET/IB/2 +r6i4n5:37342:37514 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37341:37516 [2] NCCL INFO Channel 01 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1766:1916 [0] NCCL INFO Channel 00 : 16[1a000] -> 24[1a000] [send] via NET/IB/3 +r9i1n4:24818:24916 [0] NCCL INFO Channel 00 : 48[1a000] -> 56[1a000] [send] via NET/IB/3 +r7i4n5:46160:46254 [2] NCCL INFO Channel 01 : 26[88000] -> 22[88000] [receive] via NET/IB/2 +r7i7n0:55673:55768 [0] NCCL INFO Channel 00 : 32[1a000] -> 48[1a000] [send] via NET/IB/3 +r7i6n8:29154:29247 [2] NCCL INFO Channel 01 : 26[88000] -> 30[88000] [receive] via NET/IB/2 +r7i1n3:4941:5040 [0] NCCL INFO Channel 01 : 1[1c000] -> 4[1a000] [receive] via NET/IB/3 +r9i1n6:58312:58410 [0] NCCL INFO Channel 00 : 56[1a000] -> 60[1a000] [send] via NET/IB/3 +r7i4n5:46160:46254 [2] NCCL INFO Channel 01 : 22[88000] -> 23[8a000] via P2P/IPC +r7i7n2:57608:57705 [0] NCCL INFO Channel 00 : 40[1a000] -> 44[1a000] [send] via NET/IB/3 +r7i6n8:29154:29247 [2] NCCL INFO Channel 01 : 30[88000] -> 31[8a000] via P2P/IPC +r7i2n6:894:989 [2] NCCL INFO Channel 01 : 10[88000] -> 6[88000] [send] via NET/IB/2 +r7i7n2:57610:57706 [2] NCCL INFO Channel 01 : 42[88000] -> 50[88000] [send] via NET/IB/2 +r7i5n3:79994:80089 [0] NCCL INFO Channel 00 : 24[1a000] -> 28[1a000] [send] via NET/IB/3 +r7i1n3:4941:5040 [0] NCCL INFO Channel 01 : 4[1a000] -> 7[8a000] via P2P/IPC +r7i3n0:38598:38695 [0] NCCL INFO Channel 01 : 9[1c000] -> 12[1a000] [receive] via NET/IB/3 +r8i0n3:57934:58031 [2] NCCL INFO Channel 01 : 42[88000] -> 46[88000] [receive] via NET/IB/2 +r7i3n0:38598:38695 [0] NCCL INFO Channel 01 : 12[1a000] -> 15[8a000] via P2P/IPC +r6i4n5:37340:37515 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5037 [3] NCCL INFO Channel 01 : 7[8a000] -> 4[1a000] via P2P/IPC +r7i2n6:893:987 [1] NCCL INFO Channel 01 : 9[1c000] -> 8[1a000] via P2P/IPC +r9i1n6:58314:58408 [2] NCCL INFO Channel 01 : 62[88000] -> 58[88000] [receive] via NET/IB/2 +r7i3n0:38601:38694 [3] NCCL INFO Channel 01 : 15[8a000] -> 12[1a000] via P2P/IPC +r9i1n5:40815:40911 [2] NCCL INFO Channel 01 : 58[88000] -> 54[88000] [receive] via NET/IB/2 +r6i4n5:37339:37513 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40815:40911 [2] NCCL INFO Channel 01 : 54[88000] -> 55[8a000] via P2P/IPC +r6i4n5:37341:37516 [2] NCCL INFO Channel 01 : 2[88000] -> 34[88000] [send] via NET/IB/2 +r7i5n3:79996:80092 [2] NCCL INFO Channel 01 : 18[88000] -> 26[88000] [receive] via NET/IB/2 +r7i5n3:79996:80092 [2] NCCL INFO Channel 01 : 26[88000] -> 27[8a000] via P2P/IPC +r7i7n0:55675:55770 [2] NCCL INFO Channel 01 : 2[88000] -> 34[88000] [receive] via NET/IB/2 +r7i7n0:55675:55770 [2] NCCL INFO Channel 01 : 34[88000] -> 35[8a000] via P2P/IPC +r6i4n5:37340:37515 [1] NCCL INFO Channel 02 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n4:1768:1915 [2] NCCL INFO Channel 01 : 34[88000] -> 18[88000] [receive] via NET/IB/2 +r7i2n6:894:989 [2] NCCL INFO Channel 01 : 10[88000] -> 14[88000] [send] via NET/IB/2 +r7i2n6:892:988 [0] NCCL INFO Channel 01 : 5[1c000] -> 8[1a000] [receive] via NET/IB/3 +r9i1n5:40813:40909 [0] NCCL INFO Channel 01 : 49[1c000] -> 52[1a000] [receive] via NET/IB/3 +r7i4n4:1768:1915 [2] NCCL INFO Channel 01 : 18[88000] -> 19[8a000] via P2P/IPC +r7i2n6:892:988 [0] NCCL INFO Channel 01 : 8[1a000] -> 11[8a000] via P2P/IPC +r9i1n5:40813:40909 [0] NCCL INFO Channel 01 : 52[1a000] -> 55[8a000] via P2P/IPC +r9i1n4:24818:24916 [0] NCCL INFO Channel 01 : 45[1c000] -> 48[1a000] [receive] via NET/IB/3 +r6i4n5:37341:37516 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1766:1916 [0] NCCL INFO Channel 01 : 13[1c000] -> 16[1a000] [receive] via NET/IB/3 +r7i7n1:68404:68502 [0] NCCL INFO Channel 01 : 33[1c000] -> 36[1a000] [receive] via NET/IB/3 +r7i4n5:46158:46253 [0] NCCL INFO Channel 01 : 17[1c000] -> 20[1a000] [receive] via NET/IB/3 +r7i7n0:55673:55768 [0] NCCL INFO Channel 01 : 29[1c000] -> 32[1a000] [receive] via NET/IB/3 +r9i1n4:24818:24916 [0] NCCL INFO Channel 01 : 48[1a000] -> 51[8a000] via P2P/IPC +r7i4n5:46158:46253 [0] NCCL INFO Channel 01 : 20[1a000] -> 23[8a000] via P2P/IPC +r9i1n7:8760:8870 [0] NCCL INFO Channel 01 : 57[1c000] -> 60[1a000] [receive] via NET/IB/3 +r7i4n4:1766:1916 [0] NCCL INFO Channel 01 : 16[1a000] -> 19[8a000] via P2P/IPC +r7i7n1:68404:68502 [0] NCCL INFO Channel 01 : 36[1a000] -> 39[8a000] via P2P/IPC +r7i1n3:4942:5038 [1] NCCL INFO Channel 01 : 5[1c000] -> 4[1a000] via P2P/IPC +r9i1n7:8760:8870 [0] NCCL INFO Channel 01 : 60[1a000] -> 63[8a000] via P2P/IPC +r7i7n0:55673:55768 [0] NCCL INFO Channel 01 : 32[1a000] -> 35[8a000] via P2P/IPC +r8i0n3:57932:58028 [0] NCCL INFO Channel 01 : 41[1c000] -> 44[1a000] [receive] via NET/IB/3 +r7i6n8:29152:29249 [0] NCCL INFO Channel 01 : 25[1c000] -> 28[1a000] [receive] via NET/IB/3 +r7i5n3:79994:80089 [0] NCCL INFO Channel 01 : 21[1c000] -> 24[1a000] [receive] via NET/IB/3 +r6i4n5:37341:37516 [2] NCCL INFO Channel 02 : 2[88000] -> 1[1c000] via P2P/IPC +r7i6n8:29152:29249 [0] NCCL INFO Channel 01 : 28[1a000] -> 31[8a000] via P2P/IPC +r7i2n6:895:990 [3] NCCL INFO Channel 01 : 11[8a000] -> 8[1a000] via P2P/IPC +r9i1n5:40816:40910 [3] NCCL INFO Channel 01 : 55[8a000] -> 52[1a000] via P2P/IPC +r9i1n6:58314:58408 [2] NCCL INFO Channel 01 : 58[88000] -> 50[88000] [send] via NET/IB/2 +r7i3n0:38599:38693 [1] NCCL INFO Channel 01 : 13[1c000] -> 12[1a000] via P2P/IPC +r7i5n3:79994:80089 [0] NCCL INFO Channel 01 : 24[1a000] -> 27[8a000] via P2P/IPC +r9i1n6:58312:58410 [0] NCCL INFO Channel 01 : 53[1c000] -> 56[1a000] [receive] via NET/IB/3 +r8i0n3:57932:58028 [0] NCCL INFO Channel 01 : 44[1a000] -> 47[8a000] via P2P/IPC +r7i7n2:57610:57706 [2] NCCL INFO Channel 01 : 50[88000] -> 42[88000] [receive] via NET/IB/2 +r9i1n4:24819:24914 [1] NCCL INFO Channel 01 : 49[1c000] -> 48[1a000] via P2P/IPC +r7i7n1:68407:68499 [3] NCCL INFO Channel 01 : 39[8a000] -> 36[1a000] via P2P/IPC +r7i7n2:57608:57705 [0] NCCL INFO Channel 01 : 37[1c000] -> 40[1a000] [receive] via NET/IB/3 +r8i0n3:57933:58029 [1] NCCL INFO Channel 01 : 45[1c000] -> 44[1a000] via P2P/IPC +r7i4n5:46161:46256 [3] NCCL INFO Channel 01 : 23[8a000] -> 20[1a000] via P2P/IPC +r9i1n7:8762:8871 [2] NCCL INFO Channel 01 : 58[88000] -> 62[88000] [receive] via NET/IB/2 +r7i1n3:4941:5040 [0] NCCL INFO Channel 01 : 4[1a000] -> 5[1c000] via P2P/IPC +r9i1n7:8763:8868 [3] NCCL INFO Channel 01 : 63[8a000] -> 60[1a000] via P2P/IPC +r9i1n7:8762:8871 [2] NCCL INFO Channel 01 : 62[88000] -> 63[8a000] via P2P/IPC +r7i6n8:29153:29248 [1] NCCL INFO Channel 01 : 29[1c000] -> 28[1a000] via P2P/IPC +r7i7n0:55674:55769 [1] NCCL INFO Channel 01 : 33[1c000] -> 32[1a000] via P2P/IPC +r9i1n6:58312:58410 [0] NCCL INFO Channel 01 : 56[1a000] -> 59[8a000] via P2P/IPC +r7i4n4:1767:1914 [1] NCCL INFO Channel 01 : 17[1c000] -> 16[1a000] via P2P/IPC +r9i1n4:24821:24915 [3] NCCL INFO Channel 01 : 51[8a000] -> 48[1a000] via P2P/IPC +r7i7n1:68406:68501 [2] NCCL INFO Channel 01 : 38[88000] -> 39[8a000] via P2P/IPC +r9i1n6:58313:58407 [1] NCCL INFO Channel 01 : 57[1c000] -> 56[1a000] via P2P/IPC +r7i4n4:1769:1913 [3] NCCL INFO Channel 01 : 19[8a000] -> 16[1a000] via P2P/IPC +r7i4n5:46159:46255 [1] NCCL INFO Channel 01 : 21[1c000] -> 20[1a000] via P2P/IPC +r7i3n0:38598:38695 [0] NCCL INFO Channel 01 : 12[1a000] -> 13[1c000] via P2P/IPC +r7i5n3:79996:80092 [2] NCCL INFO Channel 01 : 26[88000] -> 22[88000] [send] via NET/IB/2 +r9i1n5:40814:40908 [1] NCCL INFO Channel 01 : 53[1c000] -> 52[1a000] via P2P/IPC +r8i0n3:57935:58030 [3] NCCL INFO Channel 01 : 47[8a000] -> 44[1a000] via P2P/IPC +r6i4n5:37342:37514 [3] NCCL INFO Channel 02 : 3[8a000] -> 4[1a000] [send] via NET/IB/2 +r7i2n6:892:988 [0] NCCL INFO Channel 01 : 8[1a000] -> 9[1c000] via P2P/IPC +r7i7n2:57610:57706 [2] NCCL INFO Channel 01 : 42[88000] -> 43[8a000] via P2P/IPC +r7i7n2:57608:57705 [0] NCCL INFO Channel 01 : 40[1a000] -> 43[8a000] via P2P/IPC +r6i4n5:37339:37513 [0] NCCL INFO Channel 02 : 63[8a000] -> 0[1a000] [receive] via NET/IB/3 +r7i7n0:55676:55771 [3] NCCL INFO Channel 01 : 35[8a000] -> 32[1a000] via P2P/IPC +r7i6n8:29155:29250 [3] NCCL INFO Channel 01 : 31[8a000] -> 28[1a000] via P2P/IPC +r7i5n3:79995:80090 [1] NCCL INFO Channel 01 : 25[1c000] -> 24[1a000] via P2P/IPC +r7i7n2:57609:57704 [1] NCCL INFO Channel 01 : 41[1c000] -> 40[1a000] via P2P/IPC +r7i7n1:68405:68500 [1] NCCL INFO Channel 01 : 37[1c000] -> 36[1a000] via P2P/IPC +r7i7n0:55675:55770 [2] NCCL INFO Channel 01 : 34[88000] -> 18[88000] [send] via NET/IB/2 +r6i4n5:37339:37513 [0] NCCL INFO Channel 02 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n6:58315:58409 [3] NCCL INFO Channel 01 : 59[8a000] -> 56[1a000] via P2P/IPC +r7i5n3:79997:80091 [3] NCCL INFO Channel 01 : 27[8a000] -> 24[1a000] via P2P/IPC +r9i1n4:24818:24916 [0] NCCL INFO Channel 01 : 48[1a000] -> 49[1c000] via P2P/IPC +r7i4n5:46158:46253 [0] NCCL INFO Channel 01 : 20[1a000] -> 21[1c000] via P2P/IPC +r7i1n3:4943:5039 [2] NCCL INFO Channel 02 : 6[88000] -> 7[8a000] via P2P/IPC +r9i1n7:8760:8870 [0] NCCL INFO Channel 01 : 60[1a000] -> 61[1c000] via P2P/IPC +r7i4n4:1768:1915 [2] NCCL INFO Channel 01 : 18[88000] -> 10[88000] [send] via NET/IB/2 +r9i1n5:40813:40909 [0] NCCL INFO Channel 01 : 52[1a000] -> 53[1c000] via P2P/IPC +r7i1n3:4942:5038 [1] NCCL INFO Channel 02 : 5[1c000] -> 6[88000] via P2P/IPC +r7i4n4:1766:1916 [0] NCCL INFO Channel 01 : 16[1a000] -> 17[1c000] via P2P/IPC +r7i7n2:57611:57703 [3] NCCL INFO Channel 01 : 43[8a000] -> 40[1a000] via P2P/IPC +r6i4n5:37340:37515 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:893:987 [1] NCCL INFO Channel 02 : 9[1c000] -> 10[88000] via P2P/IPC +r7i7n0:55673:55768 [0] NCCL INFO Channel 01 : 32[1a000] -> 33[1c000] via P2P/IPC +r8i0n3:57932:58028 [0] NCCL INFO Channel 01 : 44[1a000] -> 45[1c000] via P2P/IPC +r7i6n8:29152:29249 [0] NCCL INFO Channel 01 : 28[1a000] -> 29[1c000] via P2P/IPC +r7i7n1:68404:68502 [0] NCCL INFO Channel 01 : 36[1a000] -> 37[1c000] via P2P/IPC +r7i3n0:38600:38696 [2] NCCL INFO Channel 02 : 14[88000] -> 15[8a000] via P2P/IPC +r7i3n0:38599:38693 [1] NCCL INFO Channel 02 : 13[1c000] -> 14[88000] via P2P/IPC +r9i1n4:24819:24914 [1] NCCL INFO Channel 02 : 49[1c000] -> 50[88000] via P2P/IPC +r7i1n3:4943:5039 [2] NCCL INFO Channel 02 : 6[88000] -> 5[1c000] via P2P/IPC +r9i1n7:8761:8869 [1] NCCL INFO Channel 02 : 61[1c000] -> 62[88000] via P2P/IPC +r7i5n3:79994:80089 [0] NCCL INFO Channel 01 : 24[1a000] -> 25[1c000] via P2P/IPC +r9i1n6:58312:58410 [0] NCCL INFO Channel 01 : 56[1a000] -> 57[1c000] via P2P/IPC +r7i4n5:46159:46255 [1] NCCL INFO Channel 02 : 21[1c000] -> 22[88000] via P2P/IPC +r9i1n5:40814:40908 [1] NCCL INFO Channel 02 : 53[1c000] -> 54[88000] via P2P/IPC +r7i4n4:1767:1914 [1] NCCL INFO Channel 02 : 17[1c000] -> 18[88000] via P2P/IPC +r8i0n3:57933:58029 [1] NCCL INFO Channel 02 : 45[1c000] -> 46[88000] via P2P/IPC +r7i7n2:57608:57705 [0] NCCL INFO Channel 01 : 40[1a000] -> 41[1c000] via P2P/IPC +r7i7n0:55674:55769 [1] NCCL INFO Channel 02 : 33[1c000] -> 34[88000] via P2P/IPC +r7i3n0:38600:38696 [2] NCCL INFO Channel 02 : 14[88000] -> 13[1c000] via P2P/IPC +r7i6n8:29153:29248 [1] NCCL INFO Channel 02 : 29[1c000] -> 30[88000] via P2P/IPC +r7i7n1:68405:68500 [1] NCCL INFO Channel 02 : 37[1c000] -> 38[88000] via P2P/IPC +r9i1n4:24820:24917 [2] NCCL INFO Channel 01 : 34[88000] -> 50[88000] [receive] via NET/IB/2 +r9i1n6:58313:58407 [1] NCCL INFO Channel 02 : 57[1c000] -> 58[88000] via P2P/IPC +r9i1n4:24820:24917 [2] NCCL INFO Channel 01 : 50[88000] -> 51[8a000] via P2P/IPC +r7i5n3:79995:80090 [1] NCCL INFO Channel 02 : 25[1c000] -> 26[88000] via P2P/IPC +r7i5n3:79996:80092 [2] NCCL INFO Channel 01 : 26[88000] -> 30[88000] [send] via NET/IB/2 +r7i7n0:55675:55770 [2] NCCL INFO Channel 01 : 34[88000] -> 50[88000] [send] via NET/IB/2 +r7i7n2:57610:57706 [2] NCCL INFO Channel 01 : 42[88000] -> 38[88000] [send] via NET/IB/2 +r7i1n3:4941:5040 [0] NCCL INFO Channel 02 : 3[8a000] -> 4[1a000] [receive] via NET/IB/3 +r9i1n6:58314:58408 [2] NCCL INFO Channel 01 : 50[88000] -> 58[88000] [receive] via NET/IB/2 +r7i7n2:57609:57704 [1] NCCL INFO Channel 02 : 41[1c000] -> 42[88000] via P2P/IPC +r7i1n3:4944:5037 [3] NCCL INFO Channel 02 : 7[8a000] -> 8[1a000] [send] via NET/IB/2 +r9i1n6:58314:58408 [2] NCCL INFO Channel 01 : 58[88000] -> 59[8a000] via P2P/IPC +r7i1n3:4941:5040 [0] NCCL INFO Channel 02 : 4[1a000] -> 5[1c000] via P2P/IPC +r7i2n6:892:988 [0] NCCL INFO Channel 02 : 7[8a000] -> 8[1a000] [receive] via NET/IB/3 +r7i4n4:1768:1915 [2] NCCL INFO Channel 01 : 18[88000] -> 26[88000] [send] via NET/IB/2 +r7i2n6:895:990 [3] NCCL INFO Channel 02 : 11[8a000] -> 12[1a000] [send] via NET/IB/2 +r7i3n0:38598:38695 [0] NCCL INFO Channel 02 : 11[8a000] -> 12[1a000] [receive] via NET/IB/3 +r9i1n4:24818:24916 [0] NCCL INFO Channel 02 : 47[8a000] -> 48[1a000] [receive] via NET/IB/3 +r7i3n0:38601:38694 [3] NCCL INFO Channel 02 : 15[8a000] -> 16[1a000] [send] via NET/IB/2 +r6i4n5:37342:37514 [3] NCCL INFO Channel 02 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:892:988 [0] NCCL INFO Channel 02 : 8[1a000] -> 9[1c000] via P2P/IPC +r9i1n4:24818:24916 [0] NCCL INFO Channel 02 : 48[1a000] -> 49[1c000] via P2P/IPC +r7i3n0:38598:38695 [0] NCCL INFO Channel 02 : 12[1a000] -> 13[1c000] via P2P/IPC +r7i4n5:46158:46253 [0] NCCL INFO Channel 02 : 19[8a000] -> 20[1a000] [receive] via NET/IB/3 +r7i4n5:46161:46256 [3] NCCL INFO Channel 02 : 23[8a000] -> 24[1a000] [send] via NET/IB/2 +r8i0n3:57932:58028 [0] NCCL INFO Channel 02 : 43[8a000] -> 44[1a000] [receive] via NET/IB/3 +r9i1n7:8760:8870 [0] NCCL INFO Channel 02 : 59[8a000] -> 60[1a000] [receive] via NET/IB/3 +r7i1n3:4942:5038 [1] NCCL INFO Channel 02 : 5[1c000] -> 4[1a000] via P2P/IPC +r9i1n5:40813:40909 [0] NCCL INFO Channel 02 : 51[8a000] -> 52[1a000] [receive] via NET/IB/3 +r9i1n7:8763:8868 [3] NCCL INFO Channel 02 : 63[8a000] -> 0[1a000] [send] via NET/IB/2 +r9i1n5:40816:40910 [3] NCCL INFO Channel 02 : 55[8a000] -> 56[1a000] [send] via NET/IB/2 +r9i1n7:8760:8870 [0] NCCL INFO Channel 02 : 60[1a000] -> 61[1c000] via P2P/IPC +r7i4n4:1769:1913 [3] NCCL INFO Channel 02 : 19[8a000] -> 20[1a000] [send] via NET/IB/2 +r8i0n3:57932:58028 [0] NCCL INFO Channel 02 : 44[1a000] -> 45[1c000] via P2P/IPC +r7i4n5:46160:46254 [2] NCCL INFO Channel 02 : 22[88000] -> 23[8a000] via P2P/IPC +r7i7n0:55673:55768 [0] NCCL INFO Channel 02 : 31[8a000] -> 32[1a000] [receive] via NET/IB/3 +r9i1n5:40813:40909 [0] NCCL INFO Channel 02 : 52[1a000] -> 53[1c000] via P2P/IPC +r7i4n4:1766:1916 [0] NCCL INFO Channel 02 : 15[8a000] -> 16[1a000] [receive] via NET/IB/3 +r9i1n6:58312:58410 [0] NCCL INFO Channel 02 : 55[8a000] -> 56[1a000] [receive] via NET/IB/3 +r7i7n1:68404:68502 [0] NCCL INFO Channel 02 : 35[8a000] -> 36[1a000] [receive] via NET/IB/3 +r7i6n8:29155:29250 [3] NCCL INFO Channel 02 : 31[8a000] -> 32[1a000] [send] via NET/IB/2 +r7i4n5:46158:46253 [0] NCCL INFO Channel 02 : 20[1a000] -> 21[1c000] via P2P/IPC +r7i1n3:4944:5037 [3] NCCL INFO Channel 02 : 7[8a000] -> 6[88000] via P2P/IPC +r7i7n0:55676:55771 [3] NCCL INFO Channel 02 : 35[8a000] -> 36[1a000] [send] via NET/IB/2 +r6i4n5:37341:37516 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n4:1766:1916 [0] NCCL INFO Channel 02 : 16[1a000] -> 17[1c000] via P2P/IPC +r7i7n1:68407:68499 [3] NCCL INFO Channel 02 : 39[8a000] -> 40[1a000] [send] via NET/IB/2 +r7i6n8:29152:29249 [0] NCCL INFO Channel 02 : 27[8a000] -> 28[1a000] [receive] via NET/IB/3 +r7i3n0:38599:38693 [1] NCCL INFO Channel 02 : 13[1c000] -> 12[1a000] via P2P/IPC +r9i1n6:58312:58410 [0] NCCL INFO Channel 02 : 56[1a000] -> 57[1c000] via P2P/IPC +r6i4n5:37342:37514 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68404:68502 [0] NCCL INFO Channel 02 : 36[1a000] -> 37[1c000] via P2P/IPC +r7i7n0:55673:55768 [0] NCCL INFO Channel 02 : 32[1a000] -> 33[1c000] via P2P/IPC +r7i6n8:29154:29247 [2] NCCL INFO Channel 02 : 30[88000] -> 31[8a000] via P2P/IPC +r7i6n8:29152:29249 [0] NCCL INFO Channel 02 : 28[1a000] -> 29[1c000] via P2P/IPC +r7i5n3:79994:80089 [0] NCCL INFO Channel 02 : 23[8a000] -> 24[1a000] [receive] via NET/IB/3 +r7i2n6:894:989 [2] NCCL INFO Channel 02 : 10[88000] -> 11[8a000] via P2P/IPC +r7i5n3:79997:80091 [3] NCCL INFO Channel 02 : 27[8a000] -> 28[1a000] [send] via NET/IB/2 +r9i1n4:24820:24917 [2] NCCL INFO Channel 01 : 50[88000] -> 42[88000] [send] via NET/IB/2 +r7i7n0:55675:55770 [2] NCCL INFO Channel 02 : 34[88000] -> 35[8a000] via P2P/IPC +r7i3n0:38601:38694 [3] NCCL INFO Channel 02 : 15[8a000] -> 14[88000] via P2P/IPC +r7i7n2:57608:57705 [0] NCCL INFO Channel 02 : 39[8a000] -> 40[1a000] [receive] via NET/IB/3 +r7i5n3:79994:80089 [0] NCCL INFO Channel 02 : 24[1a000] -> 25[1c000] via P2P/IPC +r9i1n4:24821:24915 [3] NCCL INFO Channel 02 : 51[8a000] -> 52[1a000] [send] via NET/IB/2 +r7i7n2:57611:57703 [3] NCCL INFO Channel 02 : 43[8a000] -> 44[1a000] [send] via NET/IB/2 +r7i4n5:46159:46255 [1] NCCL INFO Channel 02 : 21[1c000] -> 20[1a000] via P2P/IPC +r7i1n3:4943:5039 [2] NCCL INFO Channel 03 : 6[88000] -> 5[1c000] via P2P/IPC +r7i7n2:57608:57705 [0] NCCL INFO Channel 02 : 40[1a000] -> 41[1c000] via P2P/IPC +r7i1n3:4944:5037 [3] NCCL INFO Channel 03 : 7[8a000] -> 6[88000] via P2P/IPC +r7i7n2:57610:57706 [2] NCCL INFO Channel 01 : 42[88000] -> 46[88000] [send] via NET/IB/2 +r7i4n4:1768:1915 [2] NCCL INFO Channel 02 : 18[88000] -> 19[8a000] via P2P/IPC +r7i6n8:29155:29250 [3] NCCL INFO Channel 02 : 31[8a000] -> 30[88000] via P2P/IPC +r7i5n3:79996:80092 [2] NCCL INFO Channel 02 : 26[88000] -> 27[8a000] via P2P/IPC +r7i4n5:46160:46254 [2] NCCL INFO Channel 02 : 22[88000] -> 21[1c000] via P2P/IPC +r8i0n3:57934:58031 [2] NCCL INFO Channel 01 : 46[88000] -> 47[8a000] via P2P/IPC +r7i2n6:895:990 [3] NCCL INFO Channel 02 : 11[8a000] -> 10[88000] via P2P/IPC +r9i1n6:58314:58408 [2] NCCL INFO Channel 01 : 58[88000] -> 54[88000] [send] via NET/IB/2 +r7i4n5:46161:46256 [3] NCCL INFO Channel 02 : 23[8a000] -> 22[88000] via P2P/IPC +r7i2n6:893:987 [1] NCCL INFO Channel 02 : 9[1c000] -> 8[1a000] via P2P/IPC +r7i7n0:55676:55771 [3] NCCL INFO Channel 02 : 35[8a000] -> 34[88000] via P2P/IPC +r7i3n0:38600:38696 [2] NCCL INFO Channel 03 : 14[88000] -> 13[1c000] via P2P/IPC +r9i1n6:58315:58409 [3] NCCL INFO Channel 02 : 59[8a000] -> 60[1a000] [send] via NET/IB/2 +r7i7n0:55674:55769 [1] NCCL INFO Channel 02 : 33[1c000] -> 32[1a000] via P2P/IPC +r7i3n0:38601:38694 [3] NCCL INFO Channel 03 : 15[8a000] -> 14[88000] via P2P/IPC +r7i6n8:29153:29248 [1] NCCL INFO Channel 02 : 29[1c000] -> 28[1a000] via P2P/IPC +r7i6n8:29154:29247 [2] NCCL INFO Channel 02 : 30[88000] -> 29[1c000] via P2P/IPC +r7i2n6:894:989 [2] NCCL INFO Channel 02 : 10[88000] -> 9[1c000] via P2P/IPC +r7i4n4:1769:1913 [3] NCCL INFO Channel 02 : 19[8a000] -> 18[88000] via P2P/IPC +r7i7n0:55675:55770 [2] NCCL INFO Channel 02 : 34[88000] -> 33[1c000] via P2P/IPC +r7i4n4:1767:1914 [1] NCCL INFO Channel 02 : 17[1c000] -> 16[1a000] via P2P/IPC +r7i1n3:4941:5040 [0] NCCL INFO Channel 02 : 36[1a000] -> 4[1a000] [receive] via NET/IB/3 +r7i5n3:79997:80091 [3] NCCL INFO Channel 02 : 27[8a000] -> 26[88000] via P2P/IPC +r7i7n1:68406:68501 [2] NCCL INFO Channel 02 : 38[88000] -> 39[8a000] via P2P/IPC +r7i5n3:79995:80090 [1] NCCL INFO Channel 02 : 25[1c000] -> 24[1a000] via P2P/IPC +r7i4n5:46161:46256 [3] NCCL INFO Channel 03 : 23[8a000] -> 22[88000] via P2P/IPC +r7i2n6:892:988 [0] NCCL INFO Channel 02 : 8[1a000] -> 12[1a000] [send] via NET/IB/3 +r7i4n5:46160:46254 [2] NCCL INFO Channel 03 : 22[88000] -> 21[1c000] via P2P/IPC +r7i3n0:38598:38695 [0] NCCL INFO Channel 02 : 8[1a000] -> 12[1a000] [receive] via NET/IB/3 +r8i0n3:57934:58031 [2] NCCL INFO Channel 02 : 46[88000] -> 47[8a000] via P2P/IPC +r7i6n8:29155:29250 [3] NCCL INFO Channel 03 : 31[8a000] -> 30[88000] via P2P/IPC +r7i2n6:895:990 [3] NCCL INFO Channel 03 : 11[8a000] -> 10[88000] via P2P/IPC +r9i1n4:24820:24917 [2] NCCL INFO Channel 01 : 50[88000] -> 58[88000] [send] via NET/IB/2 +r7i5n3:79996:80092 [2] NCCL INFO Channel 02 : 26[88000] -> 25[1c000] via P2P/IPC +r7i4n4:1768:1915 [2] NCCL INFO Channel 02 : 18[88000] -> 17[1c000] via P2P/IPC +r6i4n5:37339:37513 [0] NCCL INFO Channel 02 : 0[1a000] -> 60[1a000] [send] via NET/IB/3 +r7i7n0:55676:55771 [3] NCCL INFO Channel 03 : 35[8a000] -> 34[88000] via P2P/IPC +r7i1n3:4942:5038 [1] NCCL INFO Channel 03 : 5[1c000] -> 8[1a000] [send] via NET/IB/3 +r7i6n8:29154:29247 [2] NCCL INFO Channel 03 : 30[88000] -> 29[1c000] via P2P/IPC +r7i4n4:1766:1916 [0] NCCL INFO Channel 02 : 16[1a000] -> 12[1a000] [send] via NET/IB/3 +r7i7n0:55675:55770 [2] NCCL INFO Channel 03 : 34[88000] -> 33[1c000] via P2P/IPC +r6i4n5:37341:37516 [2] NCCL INFO Channel 03 : 2[88000] -> 62[88000] [send] via NET/IB/2 +r7i2n6:894:989 [2] NCCL INFO Channel 03 : 10[88000] -> 9[1c000] via P2P/IPC +r7i7n1:68407:68499 [3] NCCL INFO Channel 02 : 39[8a000] -> 38[88000] via P2P/IPC +r9i1n6:58312:58410 [0] NCCL INFO Channel 02 : 56[1a000] -> 60[1a000] [send] via NET/IB/3 +r7i4n5:46158:46253 [0] NCCL INFO Channel 02 : 12[1a000] -> 20[1a000] [receive] via NET/IB/3 +r8i0n3:57933:58029 [1] NCCL INFO Channel 02 : 45[1c000] -> 44[1a000] via P2P/IPC +r7i7n1:68405:68500 [1] NCCL INFO Channel 02 : 37[1c000] -> 36[1a000] via P2P/IPC +r6i4n5:37340:37515 [1] NCCL INFO Channel 03 : 1[1c000] -> 4[1a000] [send] via NET/IB/3 +r7i7n1:68404:68502 [0] NCCL INFO Channel 02 : 20[1a000] -> 36[1a000] [receive] via NET/IB/3 +r7i3n0:38599:38693 [1] NCCL INFO Channel 03 : 13[1c000] -> 16[1a000] [send] via NET/IB/3 +r7i5n3:79997:80091 [3] NCCL INFO Channel 03 : 27[8a000] -> 26[88000] via P2P/IPC +r7i4n4:1769:1913 [3] NCCL INFO Channel 03 : 19[8a000] -> 18[88000] via P2P/IPC +r7i1n3:4943:5039 [2] NCCL INFO Channel 03 : 38[88000] -> 6[88000] [receive] via NET/IB/2 +r9i1n6:58314:58408 [2] NCCL INFO Channel 01 : 58[88000] -> 62[88000] [send] via NET/IB/2 +r7i7n1:68406:68501 [2] NCCL INFO Channel 02 : 38[88000] -> 37[1c000] via P2P/IPC +r9i1n5:40813:40909 [0] NCCL INFO Channel 02 : 44[1a000] -> 52[1a000] [receive] via NET/IB/3 +r8i0n3:57934:58031 [2] NCCL INFO Channel 02 : 46[88000] -> 45[1c000] via P2P/IPC +r7i5n3:79996:80092 [2] NCCL INFO Channel 03 : 26[88000] -> 25[1c000] via P2P/IPC +r7i4n4:1768:1915 [2] NCCL INFO Channel 03 : 18[88000] -> 17[1c000] via P2P/IPC +r7i7n0:55673:55768 [0] NCCL INFO Channel 02 : 32[1a000] -> 28[1a000] [send] via NET/IB/3 +r7i6n8:29152:29249 [0] NCCL INFO Channel 02 : 24[1a000] -> 28[1a000] [receive] via NET/IB/3 +r7i7n2:57610:57706 [2] NCCL INFO Channel 02 : 42[88000] -> 43[8a000] via P2P/IPC +r7i3n0:38600:38696 [2] NCCL INFO Channel 03 : 10[88000] -> 14[88000] [receive] via NET/IB/2 +r7i7n2:57608:57705 [0] NCCL INFO Channel 02 : 40[1a000] -> 44[1a000] [send] via NET/IB/3 +r8i0n3:57932:58028 [0] NCCL INFO Channel 02 : 40[1a000] -> 44[1a000] [receive] via NET/IB/3 +r8i0n3:57935:58030 [3] NCCL INFO Channel 02 : 47[8a000] -> 48[1a000] [send] via NET/IB/2 +r7i4n5:46159:46255 [1] NCCL INFO Channel 03 : 21[1c000] -> 24[1a000] [send] via NET/IB/3 +r9i1n4:24820:24917 [2] NCCL INFO Channel 02 : 50[88000] -> 51[8a000] via P2P/IPC +r7i7n1:68407:68499 [3] NCCL INFO Channel 03 : 39[8a000] -> 38[88000] via P2P/IPC +r9i1n7:8760:8870 [0] NCCL INFO Channel 02 : 56[1a000] -> 60[1a000] [receive] via NET/IB/3 +r7i5n3:79994:80089 [0] NCCL INFO Channel 02 : 24[1a000] -> 28[1a000] [send] via NET/IB/3 +r7i7n1:68406:68501 [2] NCCL INFO Channel 03 : 38[88000] -> 37[1c000] via P2P/IPC +r7i3n0:38598:38695 [0] NCCL INFO Channel 02 : 16[1a000] -> 12[1a000] [receive] via NET/IB/3 +r9i1n5:40815:40911 [2] NCCL INFO Channel 02 : 54[88000] -> 55[8a000] via P2P/IPC +r7i7n2:57611:57703 [3] NCCL INFO Channel 02 : 43[8a000] -> 42[88000] via P2P/IPC +r7i7n2:57609:57704 [1] NCCL INFO Channel 02 : 41[1c000] -> 40[1a000] via P2P/IPC +r8i0n3:57935:58030 [3] NCCL INFO Channel 02 : 47[8a000] -> 46[88000] via P2P/IPC +r7i4n5:46160:46254 [2] NCCL INFO Channel 03 : 14[88000] -> 22[88000] [receive] via NET/IB/2 +r7i7n0:55674:55769 [1] NCCL INFO Channel 03 : 33[1c000] -> 36[1a000] [send] via NET/IB/3 +r9i1n7:8762:8871 [2] NCCL INFO Channel 02 : 62[88000] -> 63[8a000] via P2P/IPC +r7i6n8:29153:29248 [1] NCCL INFO Channel 03 : 29[1c000] -> 32[1a000] [send] via NET/IB/3 +r7i2n6:893:987 [1] NCCL INFO Channel 03 : 9[1c000] -> 12[1a000] [send] via NET/IB/3 +r9i1n6:58314:58408 [2] NCCL INFO Channel 02 : 58[88000] -> 59[8a000] via P2P/IPC +r9i1n4:24821:24915 [3] NCCL INFO Channel 02 : 51[8a000] -> 50[88000] via P2P/IPC +r7i6n8:29154:29247 [2] NCCL INFO Channel 03 : 26[88000] -> 30[88000] [receive] via NET/IB/2 +r7i7n2:57610:57706 [2] NCCL INFO Channel 02 : 42[88000] -> 41[1c000] via P2P/IPC +r9i1n4:24819:24914 [1] NCCL INFO Channel 02 : 49[1c000] -> 48[1a000] via P2P/IPC +r7i4n5:46158:46253 [0] NCCL INFO Channel 02 : 28[1a000] -> 20[1a000] [receive] via NET/IB/3 +r7i2n6:892:988 [0] NCCL INFO Channel 02 : 12[1a000] -> 8[1a000] [receive] via NET/IB/3 +r9i1n7:8763:8868 [3] NCCL INFO Channel 02 : 63[8a000] -> 62[88000] via P2P/IPC +r8i0n3:57934:58031 [2] NCCL INFO Channel 03 : 46[88000] -> 45[1c000] via P2P/IPC +r7i7n0:55675:55770 [2] NCCL INFO Channel 03 : 34[88000] -> 30[88000] [send] via NET/IB/2 +r7i5n3:79995:80090 [1] NCCL INFO Channel 03 : 25[1c000] -> 28[1a000] [send] via NET/IB/3 +r7i2n6:894:989 [2] NCCL INFO Channel 03 : 10[88000] -> 14[88000] [send] via NET/IB/2 +r7i7n1:68404:68502 [0] NCCL INFO Channel 02 : 52[1a000] -> 36[1a000] [receive] via NET/IB/3 +r9i1n5:40816:40910 [3] NCCL INFO Channel 02 : 55[8a000] -> 54[88000] via P2P/IPC +r8i0n3:57935:58030 [3] NCCL INFO Channel 03 : 47[8a000] -> 46[88000] via P2P/IPC +r7i4n4:1767:1914 [1] NCCL INFO Channel 03 : 17[1c000] -> 20[1a000] [send] via NET/IB/3 +r9i1n7:8761:8869 [1] NCCL INFO Channel 02 : 61[1c000] -> 60[1a000] via P2P/IPC +r9i1n5:40814:40908 [1] NCCL INFO Channel 02 : 53[1c000] -> 52[1a000] via P2P/IPC +r9i1n4:24820:24917 [2] NCCL INFO Channel 02 : 50[88000] -> 49[1c000] via P2P/IPC +r9i1n5:40813:40909 [0] NCCL INFO Channel 02 : 60[1a000] -> 52[1a000] [receive] via NET/IB/3 +r7i7n2:57611:57703 [3] NCCL INFO Channel 03 : 43[8a000] -> 42[88000] via P2P/IPC +r7i6n8:29152:29249 [0] NCCL INFO Channel 02 : 32[1a000] -> 28[1a000] [receive] via NET/IB/3 +r9i1n6:58315:58409 [3] NCCL INFO Channel 02 : 59[8a000] -> 58[88000] via P2P/IPC +r7i3n0:38600:38696 [2] NCCL INFO Channel 03 : 18[88000] -> 14[88000] [receive] via NET/IB/2 +r7i4n4:1768:1915 [2] NCCL INFO Channel 03 : 18[88000] -> 14[88000] [send] via NET/IB/2 +r9i1n7:8762:8871 [2] NCCL INFO Channel 02 : 62[88000] -> 61[1c000] via P2P/IPC +r9i1n6:58313:58407 [1] NCCL INFO Channel 02 : 57[1c000] -> 56[1a000] via P2P/IPC +r7i5n3:79996:80092 [2] NCCL INFO Channel 03 : 26[88000] -> 30[88000] [send] via NET/IB/2 +r9i1n5:40815:40911 [2] NCCL INFO Channel 02 : 54[88000] -> 53[1c000] via P2P/IPC +r7i7n2:57610:57706 [2] NCCL INFO Channel 03 : 42[88000] -> 41[1c000] via P2P/IPC +r8i0n3:57932:58028 [0] NCCL INFO Channel 02 : 48[1a000] -> 44[1a000] [receive] via NET/IB/3 +r7i7n1:68405:68500 [1] NCCL INFO Channel 03 : 37[1c000] -> 40[1a000] [send] via NET/IB/3 +r9i1n4:24821:24915 [3] NCCL INFO Channel 03 : 51[8a000] -> 50[88000] via P2P/IPC +r8i0n3:57933:58029 [1] NCCL INFO Channel 03 : 45[1c000] -> 48[1a000] [send] via NET/IB/3 +r9i1n7:8760:8870 [0] NCCL INFO Channel 02 : 0[1a000] -> 60[1a000] [receive] via NET/IB/3 +r9i1n6:58314:58408 [2] NCCL INFO Channel 02 : 58[88000] -> 57[1c000] via P2P/IPC +r7i3n0:38598:38695 [0] NCCL INFO Channel 02 : 12[1a000] -> 20[1a000] [send] via NET/IB/3 +r9i1n7:8763:8868 [3] NCCL INFO Channel 03 : 63[8a000] -> 62[88000] via P2P/IPC +r9i1n4:24820:24917 [2] NCCL INFO Channel 03 : 50[88000] -> 49[1c000] via P2P/IPC +r9i1n5:40816:40910 [3] NCCL INFO Channel 03 : 55[8a000] -> 54[88000] via P2P/IPC +r7i7n1:68406:68501 [2] NCCL INFO Channel 03 : 22[88000] -> 38[88000] [receive] via NET/IB/2 +r7i4n5:46160:46254 [2] NCCL INFO Channel 03 : 30[88000] -> 22[88000] [receive] via NET/IB/2 +r7i5n3:79994:80089 [0] NCCL INFO Channel 02 : 28[1a000] -> 24[1a000] [receive] via NET/IB/3 +r9i1n7:8762:8871 [2] NCCL INFO Channel 03 : 62[88000] -> 61[1c000] via P2P/IPC +r9i1n5:40815:40911 [2] NCCL INFO Channel 03 : 54[88000] -> 53[1c000] via P2P/IPC +r9i1n6:58315:58409 [3] NCCL INFO Channel 03 : 59[8a000] -> 58[88000] via P2P/IPC +r7i4n4:1766:1916 [0] NCCL INFO Channel 02 : 12[1a000] -> 16[1a000] [receive] via NET/IB/3 +r7i6n8:29154:29247 [2] NCCL INFO Channel 03 : 34[88000] -> 30[88000] [receive] via NET/IB/2 +r7i7n2:57608:57705 [0] NCCL INFO Channel 02 : 44[1a000] -> 40[1a000] [receive] via NET/IB/3 +r7i4n5:46158:46253 [0] NCCL INFO Channel 02 : 20[1a000] -> 36[1a000] [send] via NET/IB/3 +r9i1n4:24818:24916 [0] NCCL INFO Channel 02 : 48[1a000] -> 44[1a000] [send] via NET/IB/3 +r9i1n6:58314:58408 [2] NCCL INFO Channel 03 : 58[88000] -> 57[1c000] via P2P/IPC +r7i7n1:68404:68502 [0] NCCL INFO Channel 02 : 36[1a000] -> 4[1a000] [send] via NET/IB/3 +r8i0n3:57934:58031 [2] NCCL INFO Channel 03 : 42[88000] -> 46[88000] [receive] via NET/IB/2 +r7i2n6:894:989 [2] NCCL INFO Channel 03 : 14[88000] -> 10[88000] [receive] via NET/IB/2 +r7i6n8:29152:29249 [0] NCCL INFO Channel 02 : 28[1a000] -> 20[1a000] [send] via NET/IB/3 +r9i1n5:40813:40909 [0] NCCL INFO Channel 02 : 52[1a000] -> 36[1a000] [send] via NET/IB/3 +r7i7n2:57609:57704 [1] NCCL INFO Channel 03 : 41[1c000] -> 44[1a000] [send] via NET/IB/3 +r7i2n6:894:989 [2] NCCL INFO Channel 03 : 10[88000] -> 11[8a000] via P2P/IPC +r7i3n0:38600:38696 [2] NCCL INFO Channel 03 : 14[88000] -> 22[88000] [send] via NET/IB/2 +r7i4n4:1768:1915 [2] NCCL INFO Channel 03 : 14[88000] -> 18[88000] [receive] via NET/IB/2 +r7i5n3:79996:80092 [2] NCCL INFO Channel 03 : 30[88000] -> 26[88000] [receive] via NET/IB/2 +r8i0n3:57932:58028 [0] NCCL INFO Channel 02 : 44[1a000] -> 52[1a000] [send] via NET/IB/3 +r7i7n0:55673:55768 [0] NCCL INFO Channel 02 : 28[1a000] -> 32[1a000] [receive] via NET/IB/3 +r7i7n2:57610:57706 [2] NCCL INFO Channel 03 : 42[88000] -> 46[88000] [send] via NET/IB/2 +r9i1n4:24819:24914 [1] NCCL INFO Channel 03 : 49[1c000] -> 52[1a000] [send] via NET/IB/3 +r9i1n6:58312:58410 [0] NCCL INFO Channel 02 : 60[1a000] -> 56[1a000] [receive] via NET/IB/3 +r7i5n3:79996:80092 [2] NCCL INFO Channel 03 : 26[88000] -> 27[8a000] via P2P/IPC +r7i4n4:1768:1915 [2] NCCL INFO Channel 03 : 18[88000] -> 19[8a000] via P2P/IPC +r9i1n7:8760:8870 [0] NCCL INFO Channel 02 : 60[1a000] -> 52[1a000] [send] via NET/IB/3 +r7i7n1:68406:68501 [2] NCCL INFO Channel 03 : 54[88000] -> 38[88000] [receive] via NET/IB/2 +r9i1n7:8761:8869 [1] NCCL INFO Channel 03 : 61[1c000] -> 0[1a000] [send] via NET/IB/3 +r9i1n5:40814:40908 [1] NCCL INFO Channel 03 : 53[1c000] -> 56[1a000] [send] via NET/IB/3 +r7i4n5:46160:46254 [2] NCCL INFO Channel 03 : 22[88000] -> 38[88000] [send] via NET/IB/2 +r9i1n4:24820:24917 [2] NCCL INFO Channel 03 : 50[88000] -> 46[88000] [send] via NET/IB/2 +r6i4n5:37339:37513 [0] NCCL INFO Channel 02 : 60[1a000] -> 0[1a000] [receive] via NET/IB/3 +r9i1n6:58313:58407 [1] NCCL INFO Channel 03 : 57[1c000] -> 60[1a000] [send] via NET/IB/3 +r7i6n8:29154:29247 [2] NCCL INFO Channel 03 : 30[88000] -> 22[88000] [send] via NET/IB/2 +r9i1n7:8762:8871 [2] NCCL INFO Channel 03 : 58[88000] -> 62[88000] [receive] via NET/IB/2 +r9i1n5:40815:40911 [2] NCCL INFO Channel 03 : 46[88000] -> 54[88000] [receive] via NET/IB/2 +r7i7n0:55675:55770 [2] NCCL INFO Channel 03 : 30[88000] -> 34[88000] [receive] via NET/IB/2 +r7i3n0:38598:38695 [0] NCCL INFO Channel 02 : 20[1a000] -> 12[1a000] [receive] via NET/IB/3 +r8i0n3:57934:58031 [2] NCCL INFO Channel 03 : 50[88000] -> 46[88000] [receive] via NET/IB/2 +r7i7n0:55675:55770 [2] NCCL INFO Channel 03 : 34[88000] -> 35[8a000] via P2P/IPC +r9i1n6:58314:58408 [2] NCCL INFO Channel 03 : 58[88000] -> 62[88000] [send] via NET/IB/2 +r9i1n4:24818:24916 [0] NCCL INFO Channel 02 : 44[1a000] -> 48[1a000] [receive] via NET/IB/3 +r7i1n3:4941:5040 [0] NCCL INFO Channel 02 : 4[1a000] -> 36[1a000] [send] via NET/IB/3 +r7i4n5:46158:46253 [0] NCCL INFO Channel 02 : 36[1a000] -> 20[1a000] [receive] via NET/IB/3 +r7i7n2:57610:57706 [2] NCCL INFO Channel 03 : 46[88000] -> 42[88000] [receive] via NET/IB/2 +r7i7n1:68404:68502 [0] NCCL INFO Channel 02 : 4[1a000] -> 36[1a000] [receive] via NET/IB/3 +r7i3n0:38600:38696 [2] NCCL INFO Channel 03 : 22[88000] -> 14[88000] [receive] via NET/IB/2 +r7i7n1:68406:68501 [2] NCCL INFO Channel 03 : 38[88000] -> 6[88000] [send] via NET/IB/2 +r7i7n2:57610:57706 [2] NCCL INFO Channel 03 : 42[88000] -> 43[8a000] via P2P/IPC +r7i6n8:29152:29249 [0] NCCL INFO Channel 02 : 20[1a000] -> 28[1a000] [receive] via NET/IB/3 +r9i1n5:40813:40909 [0] NCCL INFO Channel 02 : 36[1a000] -> 52[1a000] [receive] via NET/IB/3 +r7i3n0:38600:38696 [2] NCCL INFO Channel 03 : 14[88000] -> 15[8a000] via P2P/IPC +r9i1n7:8762:8871 [2] NCCL INFO Channel 03 : 2[88000] -> 62[88000] [receive] via NET/IB/2 +r9i1n5:40815:40911 [2] NCCL INFO Channel 03 : 62[88000] -> 54[88000] [receive] via NET/IB/2 +r8i0n3:57932:58028 [0] NCCL INFO Channel 02 : 52[1a000] -> 44[1a000] [receive] via NET/IB/3 +r7i1n3:4943:5039 [2] NCCL INFO Channel 03 : 6[88000] -> 7[8a000] via P2P/IPC +r7i3n0:38598:38695 [0] NCCL INFO Channel 02 : 12[1a000] -> 8[1a000] [send] via NET/IB/3 +r8i0n3:57934:58031 [2] NCCL INFO Channel 03 : 46[88000] -> 54[88000] [send] via NET/IB/2 +r9i1n7:8760:8870 [0] NCCL INFO Channel 02 : 52[1a000] -> 60[1a000] [receive] via NET/IB/3 +r7i4n5:46160:46254 [2] NCCL INFO Channel 03 : 38[88000] -> 22[88000] [receive] via NET/IB/2 +r9i1n4:24820:24917 [2] NCCL INFO Channel 03 : 46[88000] -> 50[88000] [receive] via NET/IB/2 +r9i1n6:58314:58408 [2] NCCL INFO Channel 03 : 62[88000] -> 58[88000] [receive] via NET/IB/2 +r7i4n5:46160:46254 [2] NCCL INFO Channel 03 : 22[88000] -> 23[8a000] via P2P/IPC +r7i6n8:29154:29247 [2] NCCL INFO Channel 03 : 22[88000] -> 30[88000] [receive] via NET/IB/2 +r9i1n4:24820:24917 [2] NCCL INFO Channel 03 : 50[88000] -> 51[8a000] via P2P/IPC +r9i1n6:58314:58408 [2] NCCL INFO Channel 03 : 58[88000] -> 59[8a000] via P2P/IPC +r7i4n5:46158:46253 [0] NCCL INFO Channel 02 : 20[1a000] -> 12[1a000] [send] via NET/IB/3 +r7i6n8:29154:29247 [2] NCCL INFO Channel 03 : 30[88000] -> 31[8a000] via P2P/IPC +r7i7n1:68404:68502 [0] NCCL INFO Channel 02 : 36[1a000] -> 20[1a000] [send] via NET/IB/3 +r7i6n8:29152:29249 [0] NCCL INFO Channel 02 : 28[1a000] -> 24[1a000] [send] via NET/IB/3 +r9i1n5:40813:40909 [0] NCCL INFO Channel 02 : 52[1a000] -> 44[1a000] [send] via NET/IB/3 +r7i3n0:38600:38696 [2] NCCL INFO Channel 03 : 14[88000] -> 10[88000] [send] via NET/IB/2 +r7i1n3:4941:5040 [0] NCCL INFO Channel 03 : 1[1c000] -> 4[1a000] [receive] via NET/IB/3 +r9i1n7:8762:8871 [2] NCCL INFO Channel 03 : 62[88000] -> 54[88000] [send] via NET/IB/2 +r9i1n5:40815:40911 [2] NCCL INFO Channel 03 : 54[88000] -> 38[88000] [send] via NET/IB/2 +r8i0n3:57932:58028 [0] NCCL INFO Channel 02 : 44[1a000] -> 40[1a000] [send] via NET/IB/3 +r7i1n3:4941:5040 [0] NCCL INFO Channel 03 : 4[1a000] -> 7[8a000] via P2P/IPC +r6i4n5:37341:37516 [2] NCCL INFO Channel 03 : 62[88000] -> 2[88000] [receive] via NET/IB/2 +r7i3n0:38598:38695 [0] NCCL INFO Channel 02 : 12[1a000] -> 16[1a000] [send] via NET/IB/3 +r7i1n3:4943:5039 [2] NCCL INFO Channel 03 : 6[88000] -> 38[88000] [send] via NET/IB/2 +r9i1n7:8760:8870 [0] NCCL INFO Channel 02 : 60[1a000] -> 56[1a000] [send] via NET/IB/3 +r6i4n5:37341:37516 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r6i4n5:37340:37515 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46160:46254 [2] NCCL INFO Channel 03 : 22[88000] -> 14[88000] [send] via NET/IB/2 +r7i1n3:4944:5037 [3] NCCL INFO Channel 03 : 7[8a000] -> 4[1a000] via P2P/IPC +r7i4n5:46158:46253 [0] NCCL INFO Channel 02 : 20[1a000] -> 28[1a000] [send] via NET/IB/3 +r7i6n8:29154:29247 [2] NCCL INFO Channel 03 : 30[88000] -> 26[88000] [send] via NET/IB/2 +r8i0n3:57934:58031 [2] NCCL INFO Channel 03 : 54[88000] -> 46[88000] [receive] via NET/IB/2 +r8i0n3:57934:58031 [2] NCCL INFO Channel 03 : 46[88000] -> 47[8a000] via P2P/IPC +r7i7n1:68404:68502 [0] NCCL INFO Channel 02 : 36[1a000] -> 52[1a000] [send] via NET/IB/3 +r7i6n8:29152:29249 [0] NCCL INFO Channel 02 : 28[1a000] -> 32[1a000] [send] via NET/IB/3 +r9i1n5:40813:40909 [0] NCCL INFO Channel 02 : 52[1a000] -> 60[1a000] [send] via NET/IB/3 +r7i3n0:38600:38696 [2] NCCL INFO Channel 03 : 14[88000] -> 18[88000] [send] via NET/IB/2 +r8i0n3:57932:58028 [0] NCCL INFO Channel 02 : 44[1a000] -> 48[1a000] [send] via NET/IB/3 +r9i1n7:8760:8870 [0] NCCL INFO Channel 02 : 60[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i7n1:68406:68501 [2] NCCL INFO Channel 03 : 6[88000] -> 38[88000] [receive] via NET/IB/2 +r7i7n1:68406:68501 [2] NCCL INFO Channel 03 : 38[88000] -> 39[8a000] via P2P/IPC +r7i1n3:4943:5039 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i2n6:892:988 [0] NCCL INFO Channel 03 : 5[1c000] -> 8[1a000] [receive] via NET/IB/3 +r7i4n5:46160:46254 [2] NCCL INFO Channel 03 : 22[88000] -> 30[88000] [send] via NET/IB/2 +r9i1n7:8762:8871 [2] NCCL INFO Channel 03 : 54[88000] -> 62[88000] [receive] via NET/IB/2 +r7i4n4:1766:1916 [0] NCCL INFO Channel 03 : 13[1c000] -> 16[1a000] [receive] via NET/IB/3 +r9i1n5:40815:40911 [2] NCCL INFO Channel 03 : 38[88000] -> 54[88000] [receive] via NET/IB/2 +r7i1n3:4943:5039 [2] NCCL INFO comm 0x153440006c80 rank 6 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i2n6:892:988 [0] NCCL INFO Channel 03 : 8[1a000] -> 11[8a000] via P2P/IPC +r7i6n8:29154:29247 [2] NCCL INFO Channel 03 : 30[88000] -> 34[88000] [send] via NET/IB/2 +r9i1n7:8762:8871 [2] NCCL INFO Channel 03 : 62[88000] -> 63[8a000] via P2P/IPC +r7i4n4:1766:1916 [0] NCCL INFO Channel 03 : 16[1a000] -> 19[8a000] via P2P/IPC +r9i1n5:40815:40911 [2] NCCL INFO Channel 03 : 54[88000] -> 55[8a000] via P2P/IPC +r7i3n0:38598:38695 [0] NCCL INFO Channel 03 : 9[1c000] -> 12[1a000] [receive] via NET/IB/3 +r8i0n3:57934:58031 [2] NCCL INFO Channel 03 : 46[88000] -> 42[88000] [send] via NET/IB/2 +r7i2n6:895:990 [3] NCCL INFO Channel 03 : 11[8a000] -> 8[1a000] via P2P/IPC +r7i1n3:4942:5038 [1] NCCL INFO Channel 03 : 5[1c000] -> 4[1a000] via P2P/IPC +r7i4n4:1769:1913 [3] NCCL INFO Channel 03 : 19[8a000] -> 16[1a000] via P2P/IPC +r7i3n0:38598:38695 [0] NCCL INFO Channel 03 : 12[1a000] -> 15[8a000] via P2P/IPC +r7i3n0:38599:38693 [1] NCCL INFO Channel 03 : 13[1c000] -> 12[1a000] via P2P/IPC +r7i4n5:46158:46253 [0] NCCL INFO Channel 03 : 17[1c000] -> 20[1a000] [receive] via NET/IB/3 +r7i2n6:893:987 [1] NCCL INFO Channel 03 : 9[1c000] -> 8[1a000] via P2P/IPC +r7i5n3:79994:80089 [0] NCCL INFO Channel 03 : 21[1c000] -> 24[1a000] [receive] via NET/IB/3 +r7i4n5:46158:46253 [0] NCCL INFO Channel 03 : 20[1a000] -> 23[8a000] via P2P/IPC +r7i5n3:79994:80089 [0] NCCL INFO Channel 03 : 24[1a000] -> 27[8a000] via P2P/IPC +r7i7n0:55673:55768 [0] NCCL INFO Channel 03 : 29[1c000] -> 32[1a000] [receive] via NET/IB/3 +r7i1n3:4941:5040 [0] NCCL INFO Channel 03 : 4[1a000] -> 5[1c000] via P2P/IPC +r7i3n0:38601:38694 [3] NCCL INFO Channel 03 : 15[8a000] -> 12[1a000] via P2P/IPC +r7i1n3:4944:5037 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n2:57608:57705 [0] NCCL INFO Channel 03 : 37[1c000] -> 40[1a000] [receive] via NET/IB/3 +r7i6n8:29152:29249 [0] NCCL INFO Channel 03 : 25[1c000] -> 28[1a000] [receive] via NET/IB/3 +r7i4n4:1767:1914 [1] NCCL INFO Channel 03 : 17[1c000] -> 16[1a000] via P2P/IPC +r7i1n3:4944:5037 [3] NCCL INFO comm 0x1454e8006c80 rank 7 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i1n3:4942:5038 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n5:40813:40909 [0] NCCL INFO Channel 03 : 49[1c000] -> 52[1a000] [receive] via NET/IB/3 +r7i7n0:55673:55768 [0] NCCL INFO Channel 03 : 32[1a000] -> 35[8a000] via P2P/IPC +r7i7n1:68406:68501 [2] NCCL INFO Channel 03 : 38[88000] -> 22[88000] [send] via NET/IB/2 +r7i1n3:4942:5038 [1] NCCL INFO comm 0x14e9b4006c80 rank 5 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n2:57608:57705 [0] NCCL INFO Channel 03 : 40[1a000] -> 43[8a000] via P2P/IPC +r7i2n6:892:988 [0] NCCL INFO Channel 03 : 8[1a000] -> 9[1c000] via P2P/IPC +r7i7n1:68404:68502 [0] NCCL INFO Channel 03 : 33[1c000] -> 36[1a000] [receive] via NET/IB/3 +r9i1n4:24818:24916 [0] NCCL INFO Channel 03 : 45[1c000] -> 48[1a000] [receive] via NET/IB/3 +r7i2n6:895:990 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i6n8:29152:29249 [0] NCCL INFO Channel 03 : 28[1a000] -> 31[8a000] via P2P/IPC +r9i1n6:58312:58410 [0] NCCL INFO Channel 03 : 53[1c000] -> 56[1a000] [receive] via NET/IB/3 +r7i2n6:894:989 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i2n6:895:990 [3] NCCL INFO comm 0x149e6c006c80 rank 11 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i5n3:79997:80091 [3] NCCL INFO Channel 03 : 27[8a000] -> 24[1a000] via P2P/IPC +r9i1n5:40813:40909 [0] NCCL INFO Channel 03 : 52[1a000] -> 55[8a000] via P2P/IPC +r7i7n1:68404:68502 [0] NCCL INFO Channel 03 : 36[1a000] -> 39[8a000] via P2P/IPC +r7i2n6:893:987 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i2n6:894:989 [2] NCCL INFO comm 0x152d0c006c80 rank 10 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i2n6:893:987 [1] NCCL INFO comm 0x153680006c80 rank 9 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n4:24818:24916 [0] NCCL INFO Channel 03 : 48[1a000] -> 51[8a000] via P2P/IPC +r8i0n3:57932:58028 [0] NCCL INFO Channel 03 : 41[1c000] -> 44[1a000] [receive] via NET/IB/3 +r7i2n6:892:988 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n6:58312:58410 [0] NCCL INFO Channel 03 : 56[1a000] -> 59[8a000] via P2P/IPC +r7i1n3:4941:5040 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i4n5:46161:46256 [3] NCCL INFO Channel 03 : 23[8a000] -> 20[1a000] via P2P/IPC +r7i4n4:1768:1915 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n7:8762:8871 [2] NCCL INFO Channel 03 : 62[88000] -> 58[88000] [send] via NET/IB/2 +r7i2n6:892:988 [0] NCCL INFO comm 0x151ecc006c80 rank 8 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r7i3n0:38598:38695 [0] NCCL INFO Channel 03 : 12[1a000] -> 13[1c000] via P2P/IPC +r7i1n3:4941:5040 [0] NCCL INFO comm 0x151800006c80 rank 4 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r6i4n5:37339:37513 [0] NCCL INFO Channel 03 : 61[1c000] -> 0[1a000] [receive] via NET/IB/3 +r7i4n5:46159:46255 [1] NCCL INFO Channel 03 : 21[1c000] -> 20[1a000] via P2P/IPC +r7i4n4:1766:1916 [0] NCCL INFO Channel 03 : 16[1a000] -> 17[1c000] via P2P/IPC +r9i1n5:40815:40911 [2] NCCL INFO Channel 03 : 54[88000] -> 46[88000] [send] via NET/IB/2 +r7i6n8:29153:29248 [1] NCCL INFO Channel 03 : 29[1c000] -> 28[1a000] via P2P/IPC +r7i5n3:79995:80090 [1] NCCL INFO Channel 03 : 25[1c000] -> 24[1a000] via P2P/IPC +r7i7n2:57611:57703 [3] NCCL INFO Channel 03 : 43[8a000] -> 40[1a000] via P2P/IPC +r7i3n0:38601:38694 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i4n5:37339:37513 [0] NCCL INFO Channel 03 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n7:8760:8870 [0] NCCL INFO Channel 03 : 57[1c000] -> 60[1a000] [receive] via NET/IB/3 +r7i7n0:55676:55771 [3] NCCL INFO Channel 03 : 35[8a000] -> 32[1a000] via P2P/IPC +r7i4n4:1769:1913 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n1:68405:68500 [1] NCCL INFO Channel 03 : 37[1c000] -> 36[1a000] via P2P/IPC +r9i1n4:24819:24914 [1] NCCL INFO Channel 03 : 49[1c000] -> 48[1a000] via P2P/IPC +r7i3n0:38601:38694 [3] NCCL INFO comm 0x1489a8006c80 rank 15 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i4n4:1769:1913 [3] NCCL INFO comm 0x14a1a0006c80 rank 19 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i4n4:1768:1915 [2] NCCL INFO comm 0x153bc8006c80 rank 18 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n7:8760:8870 [0] NCCL INFO Channel 03 : 60[1a000] -> 63[8a000] via P2P/IPC +r7i3n0:38599:38693 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i4n4:1767:1914 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i3n0:38599:38693 [1] NCCL INFO comm 0x14dda0006c80 rank 13 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i4n4:1767:1914 [1] NCCL INFO comm 0x149c6c006c80 rank 17 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i3n0:38600:38696 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i3n0:38600:38696 [2] NCCL INFO comm 0x1455d0006c80 rank 14 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n0:55674:55769 [1] NCCL INFO Channel 03 : 33[1c000] -> 32[1a000] via P2P/IPC +r8i0n3:57932:58028 [0] NCCL INFO Channel 03 : 44[1a000] -> 47[8a000] via P2P/IPC +r7i3n0:38598:38695 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i6n8:29155:29250 [3] NCCL INFO Channel 03 : 31[8a000] -> 28[1a000] via P2P/IPC +r7i7n2:57609:57704 [1] NCCL INFO Channel 03 : 41[1c000] -> 40[1a000] via P2P/IPC +r9i1n6:58315:58409 [3] NCCL INFO Channel 03 : 59[8a000] -> 56[1a000] via P2P/IPC +r8i0n3:57934:58031 [2] NCCL INFO Channel 03 : 46[88000] -> 50[88000] [send] via NET/IB/2 +r7i3n0:38598:38695 [0] NCCL INFO comm 0x14ee7c006c80 rank 12 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n1:68407:68499 [3] NCCL INFO Channel 03 : 39[8a000] -> 36[1a000] via P2P/IPC +r7i4n5:46158:46253 [0] NCCL INFO Channel 03 : 20[1a000] -> 21[1c000] via P2P/IPC +r9i1n5:40816:40910 [3] NCCL INFO Channel 03 : 55[8a000] -> 52[1a000] via P2P/IPC +r8i0n3:57933:58029 [1] NCCL INFO Channel 03 : 45[1c000] -> 44[1a000] via P2P/IPC +r6i4n5:37342:37514 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24915 [3] NCCL INFO Channel 03 : 51[8a000] -> 48[1a000] via P2P/IPC +r7i4n5:46161:46256 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n5:40814:40908 [1] NCCL INFO Channel 03 : 53[1c000] -> 52[1a000] via P2P/IPC +r7i4n4:1766:1916 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i5n3:79994:80089 [0] NCCL INFO Channel 03 : 24[1a000] -> 25[1c000] via P2P/IPC +r7i4n5:46161:46256 [3] NCCL INFO comm 0x1546a0006c80 rank 23 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i5n3:79997:80091 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n6:58313:58407 [1] NCCL INFO Channel 03 : 57[1c000] -> 56[1a000] via P2P/IPC +r7i4n5:46159:46255 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i5n3:79996:80092 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i4n4:1766:1916 [0] NCCL INFO comm 0x14e71c006c80 rank 16 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n5:46158:46253 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i5n3:79997:80091 [3] NCCL INFO comm 0x151654006c80 rank 27 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i5n3:79995:80090 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n7:8761:8869 [1] NCCL INFO Channel 03 : 61[1c000] -> 60[1a000] via P2P/IPC +r7i5n3:79996:80092 [2] NCCL INFO comm 0x14fa84006c80 rank 26 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n0:55673:55768 [0] NCCL INFO Channel 03 : 32[1a000] -> 33[1c000] via P2P/IPC +r7i4n5:46158:46253 [0] NCCL INFO comm 0x14ee3c006c80 rank 20 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n5:46159:46255 [1] NCCL INFO comm 0x14e138006c80 rank 21 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i5n3:79995:80090 [1] NCCL INFO comm 0x150924006c80 rank 25 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r8i0n3:57935:58030 [3] NCCL INFO Channel 03 : 47[8a000] -> 44[1a000] via P2P/IPC +r7i7n0:55676:55771 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n2:57608:57705 [0] NCCL INFO Channel 03 : 40[1a000] -> 41[1c000] via P2P/IPC +r7i7n0:55675:55770 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n0:55676:55771 [3] NCCL INFO comm 0x151090006c80 rank 35 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i6n8:29152:29249 [0] NCCL INFO Channel 03 : 28[1a000] -> 29[1c000] via P2P/IPC +r7i7n2:57611:57703 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n0:55674:55769 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i5n3:79994:80089 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n0:55675:55770 [2] NCCL INFO comm 0x1490fc006c80 rank 34 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i6n8:29155:29250 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n7:8763:8868 [3] NCCL INFO Channel 03 : 63[8a000] -> 60[1a000] via P2P/IPC +r7i7n0:55674:55769 [1] NCCL INFO comm 0x14deb0006c80 rank 33 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n2:57609:57704 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n5:40813:40909 [0] NCCL INFO Channel 03 : 52[1a000] -> 53[1c000] via P2P/IPC +r7i5n3:79994:80089 [0] NCCL INFO comm 0x14d248006c80 rank 24 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r7i6n8:29155:29250 [3] NCCL INFO comm 0x14bfb4006c80 rank 31 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n4:24818:24916 [0] NCCL INFO Channel 03 : 48[1a000] -> 49[1c000] via P2P/IPC +r7i7n1:68404:68502 [0] NCCL INFO Channel 03 : 36[1a000] -> 37[1c000] via P2P/IPC +r7i7n0:55673:55768 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n2:57611:57703 [3] NCCL INFO comm 0x150660006c80 rank 43 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n5:40816:40910 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n2:57609:57704 [1] NCCL INFO comm 0x14cd70006c80 rank 41 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i6n8:29154:29247 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n6:58312:58410 [0] NCCL INFO Channel 03 : 56[1a000] -> 57[1c000] via P2P/IPC +r7i6n8:29153:29248 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n4:24821:24915 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i6n8:29154:29247 [2] NCCL INFO comm 0x148034006c80 rank 30 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n1:68407:68499 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n5:40816:40910 [3] NCCL INFO comm 0x152d8c006c80 rank 55 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n5:40814:40908 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n2:57608:57705 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n2:57608:57705 [0] NCCL INFO comm 0x14fec4006c80 rank 40 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r6i4n5:37339:37513 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n0:55673:55768 [0] NCCL INFO comm 0x149258006c80 rank 32 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n2:57610:57706 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n5:40814:40908 [1] NCCL INFO comm 0x145f84006c80 rank 53 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n6:58315:58409 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n4:24819:24914 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n4:24821:24915 [3] NCCL INFO comm 0x151164006c80 rank 51 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i6n8:29153:29248 [1] NCCL INFO comm 0x15448c006c80 rank 29 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n1:68407:68499 [3] NCCL INFO comm 0x14d87c006c80 rank 39 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n1:68406:68501 [2] NCCL INFO Channel 03 : 38[88000] -> 54[88000] [send] via NET/IB/2 +r7i7n1:68405:68500 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n2:57610:57706 [2] NCCL INFO comm 0x14f55c006c80 rank 42 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r6i4n5:37342:37514 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n5:40813:40909 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n6:58315:58409 [3] NCCL INFO comm 0x14554c006c80 rank 59 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n6:58313:58407 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n4:24819:24914 [1] NCCL INFO comm 0x14f4cc006c80 rank 49 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n4:24818:24916 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i6n8:29152:29249 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n1:68405:68500 [1] NCCL INFO comm 0x14adb0006c80 rank 37 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r6i4n5:37342:37514 [3] NCCL INFO comm 0x152c94005e60 rank 3 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r6i4n5:37340:37515 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n5:40813:40909 [0] NCCL INFO comm 0x1503a0006c80 rank 52 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n6:58313:58407 [1] NCCL INFO comm 0x150c30006c80 rank 57 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n6:58312:58410 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n4:24818:24916 [0] NCCL INFO comm 0x151268006c80 rank 48 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n4:24820:24917 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i0n3:57932:58028 [0] NCCL INFO Channel 03 : 44[1a000] -> 45[1c000] via P2P/IPC +r6i4n5:37340:37515 [1] NCCL INFO comm 0x154408005e60 rank 1 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n6:58312:58410 [0] NCCL INFO comm 0x14aa2c006c80 rank 56 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n4:24820:24917 [2] NCCL INFO comm 0x152110006c80 rank 50 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i6n8:29152:29249 [0] NCCL INFO comm 0x150478006c80 rank 28 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r8i0n3:57935:58030 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i4n5:37339:37513 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i4n5:37339:37513 [0] NCCL INFO comm 0x147eb8006e20 rank 0 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r6i4n5:37339:37339 [0] NCCL INFO Launch mode Parallel +r8i0n3:57935:58030 [3] NCCL INFO comm 0x15212c006c80 rank 47 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n7:8760:8870 [0] NCCL INFO Channel 03 : 60[1a000] -> 61[1c000] via P2P/IPC +r8i0n3:57933:58029 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n7:8762:8871 [2] NCCL INFO Channel 03 : 62[88000] -> 2[88000] [send] via NET/IB/2 +r9i1n7:8763:8868 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i0n3:57933:58029 [1] NCCL INFO comm 0x150af4006c80 rank 45 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r8i0n3:57932:58028 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n7:8763:8868 [3] NCCL INFO comm 0x155074005e60 rank 63 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n7:8761:8869 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n1:68404:68502 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i0n3:57932:58028 [0] NCCL INFO comm 0x152a60006c80 rank 44 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n7:8761:8869 [1] NCCL INFO comm 0x154a5c005e60 rank 61 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n1:68404:68502 [0] NCCL INFO comm 0x1455a4006c80 rank 36 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n7:8760:8870 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i4n5:46160:46254 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n7:8760:8870 [0] NCCL INFO comm 0x14df18005e60 rank 60 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n5:46160:46254 [2] NCCL INFO comm 0x14aa04006c80 rank 22 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n5:40815:40911 [2] NCCL INFO Channel 03 : 54[88000] -> 62[88000] [send] via NET/IB/2 +r9i1n6:58314:58408 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n1:68406:68501 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n6:58314:58408 [2] NCCL INFO comm 0x14cbd0006c80 rank 58 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n1:68406:68501 [2] NCCL INFO comm 0x15406c006c80 rank 38 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r6i4n5:37341:37516 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i4n5:37341:37516 [2] NCCL INFO comm 0x14a568005e60 rank 2 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r8i0n3:57934:58031 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i0n3:57934:58031 [2] NCCL INFO comm 0x154a4c006c80 rank 46 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n7:8762:8871 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n7:8762:8871 [2] NCCL INFO comm 0x145fc0005e60 rank 62 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n5:40815:40911 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n5:40815:40911 [2] NCCL INFO comm 0x15114c006c80 rank 54 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=0 STAGE=0 LAYERS=6 [0, 6) STAGE_PARAMS=917757952 (917.758M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=3 STAGE=0 LAYERS=6 [0, 6) STAGE_PARAMS=917757952 (917.758M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=1 STAGE=0 LAYERS=6 [0, 6) STAGE_PARAMS=917757952 (917.758M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=2 STAGE=0 LAYERS=6 [0, 6) STAGE_PARAMS=917757952 (917.758M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=51 STAGE=12 LAYERS=4 [50, 54) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=34 STAGE=8 LAYERS=4 [34, 38) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=35 STAGE=8 LAYERS=4 [34, 38) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=32 STAGE=8 LAYERS=4 [34, 38) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=33 STAGE=8 LAYERS=4 [34, 38) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=16 STAGE=4 LAYERS=4 [18, 22) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=19 STAGE=4 LAYERS=4 [18, 22) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=17 STAGE=4 LAYERS=4 [18, 22) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=18 STAGE=4 LAYERS=4 [18, 22) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=49 STAGE=12 LAYERS=4 [50, 54) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=50 STAGE=12 LAYERS=4 [50, 54) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=48 STAGE=12 LAYERS=4 [50, 54) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=56 STAGE=14 LAYERS=4 [58, 62) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=27 STAGE=6 LAYERS=4 [26, 30) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=10 STAGE=2 LAYERS=4 [10, 14) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=13 STAGE=3 LAYERS=4 [14, 18) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=43 STAGE=10 LAYERS=4 [42, 46) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=41 STAGE=10 LAYERS=4 [42, 46) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=42 STAGE=10 LAYERS=4 [42, 46) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=57 STAGE=14 LAYERS=4 [58, 62) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=59 STAGE=14 LAYERS=4 [58, 62) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=58 STAGE=14 LAYERS=4 [58, 62) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=39 STAGE=9 LAYERS=4 [38, 42) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=61 STAGE=15 LAYERS=8 [62, 70) STAGE_PARAMS=917774336 (917.774M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=60 STAGE=15 LAYERS=8 [62, 70) STAGE_PARAMS=917774336 (917.774M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=63 STAGE=15 LAYERS=8 [62, 70) STAGE_PARAMS=917774336 (917.774M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=62 STAGE=15 LAYERS=8 [62, 70) STAGE_PARAMS=917774336 (917.774M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=26 STAGE=6 LAYERS=4 [26, 30) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=25 STAGE=6 LAYERS=4 [26, 30) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=24 STAGE=6 LAYERS=4 [26, 30) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=30 STAGE=7 LAYERS=4 [30, 34) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=31 STAGE=7 LAYERS=4 [30, 34) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=28 STAGE=7 LAYERS=4 [30, 34) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=29 STAGE=7 LAYERS=4 [30, 34) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=5 STAGE=1 LAYERS=4 [6, 10) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=4 STAGE=1 LAYERS=4 [6, 10) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=7 STAGE=1 LAYERS=4 [6, 10) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=6 STAGE=1 LAYERS=4 [6, 10) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=53 STAGE=13 LAYERS=4 [54, 58) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=52 STAGE=13 LAYERS=4 [54, 58) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=54 STAGE=13 LAYERS=4 [54, 58) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=11 STAGE=2 LAYERS=4 [10, 14) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=8 STAGE=2 LAYERS=4 [10, 14) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=9 STAGE=2 LAYERS=4 [10, 14) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=21 STAGE=5 LAYERS=4 [22, 26) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=20 STAGE=5 LAYERS=4 [22, 26) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=22 STAGE=5 LAYERS=4 [22, 26) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=23 STAGE=5 LAYERS=4 [22, 26) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=14 STAGE=3 LAYERS=4 [14, 18) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=15 STAGE=3 LAYERS=4 [14, 18) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=45 STAGE=11 LAYERS=4 [46, 50) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=44 STAGE=11 LAYERS=4 [46, 50) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=47 STAGE=11 LAYERS=4 [46, 50) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=46 STAGE=11 LAYERS=4 [46, 50) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=40 STAGE=10 LAYERS=4 [42, 46) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=12 STAGE=3 LAYERS=4 [14, 18) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=37 STAGE=9 LAYERS=4 [38, 42) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=38 STAGE=9 LAYERS=4 [38, 42) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=36 STAGE=9 LAYERS=4 [38, 42) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +[2021-05-27 19:49:23,327] [INFO] [engine.py:133:__init__] RANK=55 STAGE=13 LAYERS=4 [54, 58) STAGE_PARAMS=805560320 (805.560M) TOTAL_PARAMS=52453507072 (52453.507M) UNIQUE_PARAMS=52004716544 (52004.717M) +r9i1n4:24820:24931 [2] NCCL INFO Channel 00/02 : 0 1 +r9i1n4:24820:24931 [2] NCCL INFO Channel 01/02 : 0 1 +r9i1n5:40815:40925 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n5:40815:40925 [2] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r6i4n5:37341:37534 [2] NCCL INFO Channel 00/02 : 0 1 +r6i4n5:37341:37534 [2] NCCL INFO Channel 01/02 : 0 1 +r7i1n3:4943:5053 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i1n3:4943:5053 [2] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r9i1n4:24820:24931 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n4:24820:24931 [2] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r9i1n4:24820:24931 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n5:40815:40925 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r6i4n5:37341:37534 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r6i4n5:37341:37534 [2] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r6i4n5:37341:37534 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i1n3:4943:5053 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n0:55674:55785 [1] NCCL INFO Channel 00/02 : 0 1 +r7i7n0:55674:55785 [1] NCCL INFO Channel 01/02 : 0 1 +r7i7n1:68405:68515 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n1:68405:68515 [1] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i7n0:55674:55785 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n1:68405:68515 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n0:55674:55785 [1] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i7n0:55674:55785 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n1:68406:68516 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n0:55675:55789 [2] NCCL INFO Channel 00/02 : 0 1 +r7i7n1:68406:68516 [2] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i7n1:68406:68516 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n0:55675:55789 [2] NCCL INFO Channel 01/02 : 0 1 +r7i7n0:55675:55789 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n0:55675:55789 [2] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i7n0:55675:55789 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n5:40815:40925 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r9i1n4:24820:24931 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i1n3:4943:5053 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r6i4n5:37341:37534 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r6i4n5:37339:37538 [0] NCCL INFO Channel 00/02 : 0 1 +r6i4n5:37339:37538 [0] NCCL INFO Channel 01/02 : 0 1 +r7i1n3:4941:5054 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i1n3:4941:5054 [0] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r6i4n5:37339:37538 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r6i4n5:37339:37538 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r6i4n5:37339:37538 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i1n3:4941:5054 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n0:55674:55785 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i7n1:68405:68515 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i7n0:55675:55789 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i7n1:68406:68516 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r6i4n5:37341:37534 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i1n3:4943:5053 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r9i1n5:40815:40925 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r9i1n4:24820:24931 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i4n4:1766:1930 [0] NCCL INFO Channel 00/02 : 0 1 +r7i4n4:1766:1930 [0] NCCL INFO Channel 01/02 : 0 1 +r7i4n5:46158:46269 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i4n5:46158:46269 [0] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i4n4:1766:1930 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i4n4:1766:1930 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i4n4:1766:1930 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i4n5:46158:46269 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i1n3:4942:5055 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i1n3:4942:5055 [1] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i1n3:4941:5054 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r6i4n5:37340:37540 [1] NCCL INFO Channel 00/02 : 0 1 +r6i4n5:37340:37540 [1] NCCL INFO Channel 01/02 : 0 1 +r7i1n3:4942:5055 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r6i4n5:37340:37540 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r6i4n5:37340:37540 [1] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r6i4n5:37340:37540 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r6i4n5:37339:37538 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i1n3:4944:5056 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n0:55674:55785 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i7n1:68405:68515 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r6i4n5:37342:37536 [3] NCCL INFO Channel 00/02 : 0 1 +r6i4n5:37342:37536 [3] NCCL INFO Channel 01/02 : 0 1 +r6i4n5:37342:37536 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r6i4n5:37342:37536 [3] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r6i4n5:37342:37536 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i1n3:4944:5056 [3] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i1n3:4944:5056 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i5n3:79994:80106 [0] NCCL INFO Channel 00/02 : 0 1 +r7i5n3:79994:80106 [0] NCCL INFO Channel 01/02 : 0 1 +r7i6n8:29152:29263 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i5n3:79994:80106 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i5n3:79994:80106 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i6n8:29152:29263 [0] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i6n8:29152:29263 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i5n3:79994:80106 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n0:55675:55789 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i7n1:68406:68516 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r7i7n0:55673:55791 [0] NCCL INFO Channel 00/02 : 0 1 +r7i7n0:55673:55791 [0] NCCL INFO Channel 01/02 : 0 1 +r7i7n1:68404:68517 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n1:68404:68517 [0] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i7n0:55673:55791 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n0:55673:55791 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i7n0:55673:55791 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n1:68404:68517 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n0:55676:55788 [3] NCCL INFO Channel 00/02 : 0 1 +r7i7n1:68407:68518 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n1:68407:68518 [3] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i7n0:55676:55788 [3] NCCL INFO Channel 01/02 : 0 1 +r7i7n1:68407:68518 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n0:55676:55788 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n0:55676:55788 [3] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i7n0:55676:55788 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n2:57610:57721 [2] NCCL INFO Channel 00/02 : 0 1 +r7i7n2:57610:57721 [2] NCCL INFO Channel 01/02 : 0 1 +r8i0n3:57934:58045 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r8i0n3:57934:58045 [2] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i7n2:57610:57721 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n2:57610:57721 [2] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i7n2:57610:57721 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r8i0n3:57934:58045 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n4:24819:24933 [1] NCCL INFO Channel 00/02 : 0 1 +r9i1n4:24819:24933 [1] NCCL INFO Channel 01/02 : 0 1 +r9i1n5:40814:40926 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n5:40814:40926 [1] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r9i1n4:24819:24933 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n4:24819:24933 [1] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r9i1n4:24819:24933 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n5:40814:40926 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n4:1769:1932 [3] NCCL INFO Channel 00/02 : 0 1 +r7i4n5:46161:46270 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i4n4:1769:1932 [3] NCCL INFO Channel 01/02 : 0 1 +r7i4n4:1769:1932 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i4n4:1769:1932 [3] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i4n4:1769:1932 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i4n5:46161:46270 [3] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i4n5:46161:46270 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n4:24818:24937 [0] NCCL INFO Channel 00/02 : 0 1 +r9i1n5:40813:40927 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n4:24818:24937 [0] NCCL INFO Channel 01/02 : 0 1 +r9i1n4:24818:24937 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n4:24821:24936 [3] NCCL INFO Channel 00/02 : 0 1 +r9i1n5:40813:40927 [0] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r9i1n4:24821:24936 [3] NCCL INFO Channel 01/02 : 0 1 +r9i1n4:24818:24937 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r9i1n4:24818:24937 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n5:40813:40927 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n4:24821:24936 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n2:57611:57726 [3] NCCL INFO Channel 00/02 : 0 1 +r9i1n5:40816:40928 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n4:24821:24936 [3] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i3n0:38598:38709 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n5:40816:40928 [3] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r9i1n4:24821:24936 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n5:40816:40928 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i2n6:892:1006 [0] NCCL INFO Channel 00/02 : 0 1 +r7i2n6:892:1006 [0] NCCL INFO Channel 01/02 : 0 1 +r8i0n3:57935:58044 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i5n3:79995:80108 [1] NCCL INFO Channel 00/02 : 0 1 +r7i6n8:29153:29264 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n2:57611:57726 [3] NCCL INFO Channel 01/02 : 0 1 +r7i7n2:57611:57726 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i3n0:38598:38709 [0] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i7n2:57611:57726 [3] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i3n0:38598:38709 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n2:57611:57726 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i2n6:892:1006 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i2n6:892:1006 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i2n6:892:1006 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r8i0n3:57935:58044 [3] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r8i0n3:57935:58044 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i5n3:79995:80108 [1] NCCL INFO Channel 01/02 : 0 1 +r7i5n3:79995:80108 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i5n3:79995:80108 [1] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i5n3:79995:80108 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i6n8:29153:29264 [1] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i6n8:29153:29264 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n6:58315:58425 [3] NCCL INFO Channel 00/02 : 0 1 +r9i1n6:58315:58425 [3] NCCL INFO Channel 01/02 : 0 1 +r7i4n5:46160:46272 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i4n4:1768:1935 [2] NCCL INFO Channel 00/02 : 0 1 +r9i1n7:8763:8884 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n7:8763:8884 [3] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r9i1n7:8763:8884 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i3n0:38600:38710 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n2:57608:57725 [0] NCCL INFO Channel 00/02 : 0 1 +r7i7n2:57608:57725 [0] NCCL INFO Channel 01/02 : 0 1 +r8i0n3:57932:58046 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i2n6:894:1008 [2] NCCL INFO Channel 00/02 : 0 1 +r7i5n3:79996:80112 [2] NCCL INFO Channel 00/02 : 0 1 +r9i1n6:58315:58425 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n6:58314:58426 [2] NCCL INFO Channel 00/02 : 0 1 +r7i6n8:29154:29266 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n6:58315:58425 [3] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r9i1n6:58314:58426 [2] NCCL INFO Channel 01/02 : 0 1 +r9i1n6:58315:58425 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n6:58314:58426 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i4n5:46160:46272 [2] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r9i1n6:58314:58426 [2] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i4n5:46160:46272 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n6:58314:58426 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n5:46159:46271 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i4n5:46159:46271 [1] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r9i1n7:8762:8885 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i4n5:46159:46271 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n4:1768:1935 [2] NCCL INFO Channel 01/02 : 0 1 +r9i1n7:8762:8885 [2] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r9i1n7:8762:8885 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n4:1768:1935 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i4n4:1768:1935 [2] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i4n4:1768:1935 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n4:1767:1936 [1] NCCL INFO Channel 00/02 : 0 1 +r7i4n4:1767:1936 [1] NCCL INFO Channel 01/02 : 0 1 +r7i4n4:1767:1936 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i4n4:1767:1936 [1] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i4n4:1767:1936 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i3n0:38600:38710 [2] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i3n0:38600:38710 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n2:57608:57725 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r8i0n3:57932:58046 [0] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i7n2:57608:57725 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i7n2:57609:57723 [1] NCCL INFO Channel 00/02 : 0 1 +r7i7n2:57609:57723 [1] NCCL INFO Channel 01/02 : 0 1 +r7i2n6:894:1008 [2] NCCL INFO Channel 01/02 : 0 1 +r8i0n3:57932:58046 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i5n3:79996:80112 [2] NCCL INFO Channel 01/02 : 0 1 +r7i7n2:57608:57725 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i2n6:894:1008 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r8i0n3:57933:58047 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i5n3:79997:80111 [3] NCCL INFO Channel 00/02 : 0 1 +r7i5n3:79996:80112 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n2:57609:57723 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i2n6:894:1008 [2] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i6n8:29154:29266 [2] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r8i0n3:57933:58047 [1] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i5n3:79997:80111 [3] NCCL INFO Channel 01/02 : 0 1 +r7i5n3:79996:80112 [2] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r9i1n6:58312:58428 [0] NCCL INFO Channel 00/02 : 0 1 +r7i7n2:57609:57723 [1] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i2n6:894:1008 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i6n8:29154:29266 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r8i0n3:57933:58047 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i5n3:79996:80112 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n2:57609:57723 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i2n6:895:1010 [3] NCCL INFO Channel 00/02 : 0 1 +r7i6n8:29155:29265 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i5n3:79997:80111 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i2n6:895:1010 [3] NCCL INFO Channel 01/02 : 0 1 +r7i6n8:29155:29265 [3] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i5n3:79997:80111 [3] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i2n6:895:1010 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i6n8:29155:29265 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i5n3:79997:80111 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i2n6:895:1010 [3] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r9i1n7:8760:8887 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i2n6:895:1010 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n7:8760:8887 [0] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r9i1n7:8760:8887 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i3n0:38601:38711 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i3n0:38601:38711 [3] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i3n0:38601:38711 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i3n0:38599:38712 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i3n0:38599:38712 [1] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i3n0:38599:38712 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i2n6:893:1004 [1] NCCL INFO Channel 00/02 : 0 1 +r7i2n6:893:1004 [1] NCCL INFO Channel 01/02 : 0 1 +r7i2n6:893:1004 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i2n6:893:1004 [1] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i2n6:893:1004 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n6:58312:58428 [0] NCCL INFO Channel 01/02 : 0 1 +r9i1n6:58312:58428 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n6:58312:58428 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r9i1n6:58312:58428 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n6:58313:58430 [1] NCCL INFO Channel 00/02 : 0 1 +r9i1n6:58313:58430 [1] NCCL INFO Channel 01/02 : 0 1 +r9i1n6:58313:58430 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n6:58313:58430 [1] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r9i1n6:58313:58430 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n7:8761:8886 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n7:8761:8886 [1] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r9i1n7:8761:8886 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n5:46158:46269 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i4n4:1766:1930 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i1n3:4941:5054 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r6i4n5:37340:37540 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i1n3:4942:5055 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r6i4n5:37339:37538 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i6n8:29152:29263 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r9i1n4:24820:24931 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i5n3:79994:80106 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i1n3:4943:5053 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r6i4n5:37342:37536 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r9i1n5:40815:40925 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r6i4n5:37341:37534 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i7n0:55673:55791 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i7n1:68404:68517 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i7n0:55674:55785 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i7n1:68405:68515 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r9i1n5:40814:40926 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i4n5:46161:46270 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r9i1n4:24819:24933 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i7n0:55675:55789 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i7n1:68406:68516 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r9i1n4:24818:24937 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i3n0:38598:38709 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r9i1n5:40816:40928 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i5n3:79995:80108 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i6n8:29153:29264 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i7n2:57610:57721 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r9i1n7:8763:8884 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r9i1n5:40813:40927 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i3n0:38600:38710 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r9i1n6:58315:58425 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i6n8:29154:29266 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r7i4n4:1767:1936 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i2n6:892:1006 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i4n5:46159:46271 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i5n3:79996:80112 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i7n2:57608:57725 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i2n6:894:1008 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r8i0n3:57932:58046 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i7n2:57609:57723 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i3n0:38599:38712 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i7n1:68407:68518 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i2n6:893:1004 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r8i0n3:57933:58047 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r9i1n7:8760:8887 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i7n0:55676:55788 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r8i0n3:57935:58044 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r9i1n6:58312:58428 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r9i1n7:8761:8886 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r8i0n3:57934:58045 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r9i1n6:58313:58430 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i4n4:1766:1930 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i4n5:46158:46269 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i1n3:4944:5056 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i5n3:79997:80111 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i7n2:57611:57726 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i1n3:4942:5055 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r6i4n5:37340:37540 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i5n3:79994:80106 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i6n8:29152:29263 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i1n3:4943:5053 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r9i1n5:40815:40925 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r9i1n4:24820:24931 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r9i1n4:24821:24936 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r9i1n7:8762:8885 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r7i3n0:38601:38711 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r9i1n6:58314:58426 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r6i4n5:37342:37536 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r7i7n0:55673:55791 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i2n6:895:1010 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i6n8:29155:29265 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i4n5:46160:46272 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r6i4n5:37341:37534 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i7n1:68404:68517 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i4n4:1768:1935 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i7n0:55674:55785 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i7n1:68405:68515 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i1n3:4941:5054 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i4n5:46161:46270 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i7n1:68406:68516 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r7i7n0:55675:55789 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r9i1n5:40814:40926 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r6i4n5:37339:37538 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i4n4:1769:1932 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i1n3:4943:5053 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n4:24819:24933 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i3n0:38598:38709 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i1n3:4943:5053 [2] NCCL INFO comm 0x1530b4001060 rank 1 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n4:24820:24931 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n4:24820:24931 [2] NCCL INFO comm 0x151d78001060 rank 0 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r7i5n3:79995:80108 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r9i1n5:40815:40925 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n4:24820:24820 [2] NCCL INFO Launch mode Parallel +r9i1n4:24818:24937 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i7n2:57610:57721 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r9i1n7:8763:8884 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i6n8:29153:29264 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i3n0:38600:38710 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r7i4n5:46159:46271 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i4n4:1767:1936 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r9i1n5:40815:40925 [2] NCCL INFO comm 0x150dbc001060 rank 1 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n6:58315:58425 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r6i4n5:37341:37534 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n5:40816:40928 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r6i4n5:37341:37534 [2] NCCL INFO comm 0x14a168001060 rank 0 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n5:40813:40927 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i6n8:29154:29266 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r7i5n3:79996:80112 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i7n0:55674:55785 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r6i4n5:37341:37341 [2] NCCL INFO Launch mode Parallel +r7i2n6:892:1006 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i7n1:68405:68515 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n0:55674:55785 [1] NCCL INFO comm 0x14db28001060 rank 0 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n0:55674:55674 [1] NCCL INFO Launch mode Parallel +r7i7n2:57608:57725 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r8i0n3:57932:58046 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i2n6:894:1008 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i7n1:68407:68518 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i7n1:68405:68515 [1] NCCL INFO comm 0x14aa24001060 rank 1 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n1:68406:68516 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n0:55675:55789 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n2:57609:57723 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i2n6:893:1004 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i3n0:38599:38712 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i7n1:68406:68516 [2] NCCL INFO comm 0x153cdc001060 rank 1 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n0:55676:55788 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r9i1n7:8760:8887 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r8i0n3:57933:58047 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i7n0:55675:55789 [2] NCCL INFO comm 0x148d64001060 rank 0 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n7:8761:8886 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i5n3:79997:80111 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r8i0n3:57934:58045 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r7i7n0:55675:55675 [2] NCCL INFO Launch mode Parallel +r9i1n6:58313:58430 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i1n3:4944:5056 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r8i0n3:57935:58044 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r9i1n6:58312:58428 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i7n2:57611:57726 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r9i1n4:24821:24936 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r7i3n0:38601:38711 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r9i1n7:8762:8885 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r7i2n6:895:1010 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r9i1n6:58314:58426 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i4n5:46160:46272 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r7i6n8:29155:29265 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i4n5:46158:46269 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i4n4:1766:1930 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i4n4:1768:1935 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i1n3:4942:5055 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i1n3:4941:5054 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i5n3:79994:80106 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i6n8:29152:29263 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i4n4:1769:1932 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r6i4n5:37340:37540 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i7n0:55673:55791 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r6i4n5:37339:37538 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i7n1:68404:68517 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i1n3:4941:5054 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i1n3:4941:5054 [0] NCCL INFO comm 0x151470001060 rank 1 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n5:40814:40926 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r6i4n5:37339:37538 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r6i4n5:37339:37538 [0] NCCL INFO comm 0x147ac0001060 rank 0 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n4:24818:24937 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i5n3:79995:80108 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r6i4n5:37339:37339 [0] NCCL INFO Launch mode Parallel +r7i4n4:1767:1936 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i4n5:46159:46271 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i7n1:68407:68518 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r9i1n4:24819:24933 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i6n8:29153:29264 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r9i1n6:58315:58425 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +WARNING: could not find the metadata file /gpfsscratch/rech/eha/commun/checkpoints/gpt2-meg-ds/latest_checkpointed_iteration.txt +r9i1n5:40813:40927 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r9i1n7:8763:8884 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i6n8:29154:29266 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [receive] via NET/IB/2 + will not load any checkpoints and will start from random +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 1024000 + validation: 112640 + test: 10240 +> building train, validation, and test datasets for GPT2 ... + > building dataset index ... +r7i3n0:38598:38709 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i7n0:55676:55788 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i3n0:38600:38710 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r7i5n3:79996:80112 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r6i4n5:37342:37536 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 + reading sizes... + reading pointers... + reading document index... + creating numpy buffer of mmap... +r7i3n0:38599:38712 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r8i0n3:57932:58046 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 + creating memory view of numpy buffer... +r7i2n6:894:1008 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [receive] via NET/IB/2 + > finished creating indexed dataset in 0.001094 seconds + number of documents: 10000 + > dataset split: + train: +r7i4n4:1766:1930 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 + document indices in [0, 9490) total of 9490 documents + validation: +r7i4n5:46158:46269 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r8i0n3:57933:58047 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 + document indices in [9490, 9990) total of 500 documents + test: + document indices in [9990, 10000) total of 10 documents +r7i1n3:4944:5056 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i7n2:57608:57725 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i2n6:892:1006 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r9i1n5:40816:40928 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i2n6:893:1004 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r9i1n7:8760:8887 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i7n2:57609:57723 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r9i1n4:24821:24936 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i1n3:4942:5055 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r9i1n7:8761:8886 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i7n2:57610:57721 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i6n8:29152:29263 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i7n2:57611:57726 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r8i0n3:57934:58045 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r6i4n5:37340:37540 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r9i1n6:58313:58430 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i7n0:55673:55791 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r8i0n3:57935:58044 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i5n3:79997:80111 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i5n3:79994:80106 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i3n0:38601:38711 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i2n6:895:1010 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r9i1n7:8762:8885 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r9i1n6:58312:58428 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i7n1:68404:68517 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i6n8:29155:29265 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i4n5:46160:46272 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r9i1n6:58314:58426 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i4n4:1766:1930 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n5:46158:46269 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n4:1766:1930 [0] NCCL INFO comm 0x14e388001060 rank 0 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n5:46158:46269 [0] NCCL INFO comm 0x14eaac001060 rank 1 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n4:1766:1766 [0] NCCL INFO Launch mode Parallel +r7i1n3:4942:5055 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n4:1768:1935 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i1n3:4942:5055 [1] NCCL INFO comm 0x14e628001060 rank 1 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i6n8:29152:29263 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n5:46161:46270 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i6n8:29152:29263 [0] NCCL INFO comm 0x1500e8001060 rank 1 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n5:40814:40926 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i7n0:55673:55791 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i5n3:79994:80106 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r6i4n5:37340:37540 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n0:55673:55791 [0] NCCL INFO comm 0x148ec0001060 rank 0 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n0:55673:55673 [0] NCCL INFO Launch mode Parallel +r7i5n3:79994:80106 [0] NCCL INFO comm 0x14ceb0001060 rank 0 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r6i4n5:37340:37540 [1] NCCL INFO comm 0x154010001060 rank 0 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r6i4n5:37340:37340 [1] NCCL INFO Launch mode Parallel +r7i7n1:68407:68518 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i7n1:68404:68517 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i5n3:79994:79994 [0] NCCL INFO Launch mode Parallel +r7i7n1:68404:68517 [0] NCCL INFO comm 0x145214001060 rank 1 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n4:24818:24937 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i5n3:79995:80108 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r9i1n4:24819:24933 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r9i1n6:58315:58425 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r9i1n5:40813:40927 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i4n4:1769:1932 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r9i1n7:8763:8884 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i3n0:38598:38709 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i4n5:46159:46271 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r6i4n5:37342:37536 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r7i4n4:1767:1936 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i7n0:55676:55788 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r7i3n0:38600:38710 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r7i6n8:29153:29264 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i3n0:38599:38712 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i1n3:4944:5056 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r9i1n5:40816:40928 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i2n6:894:1008 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r9i1n5:40814:40926 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r8i0n3:57932:58046 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r9i1n4:24818:24937 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n2:57608:57725 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r8i0n3:57933:58047 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r9i1n5:40814:40926 [1] NCCL INFO comm 0x145bf8001060 rank 1 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i6n8:29154:29266 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r9i1n4:24821:24936 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r9i1n6:58315:58425 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n5:40813:40927 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i5n3:79995:80108 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n4:24819:24933 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n4:24818:24937 [0] NCCL INFO comm 0x150ed0001060 rank 0 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n4:24818:24818 [0] NCCL INFO Launch mode Parallel +r9i1n4:24819:24933 [1] NCCL INFO comm 0x14f144001060 rank 0 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i2n6:892:1006 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i7n1:68407:68518 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n5:40813:40927 [0] NCCL INFO comm 0x150010001060 rank 1 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n6:58315:58425 [3] NCCL INFO comm 0x1451c4001060 rank 0 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n7:8763:8884 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i5n3:79995:80108 [1] NCCL INFO comm 0x15059c001060 rank 0 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n2:57609:57723 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r9i1n4:24819:24819 [1] NCCL INFO Launch mode Parallel +r7i7n1:68407:68518 [3] NCCL INFO comm 0x14d4f0001060 rank 1 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i2n6:893:1004 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r9i1n7:8760:8887 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r9i1n6:58315:58315 [3] NCCL INFO Launch mode Parallel +r7i5n3:79995:79995 [1] NCCL INFO Launch mode Parallel +r7i4n5:46159:46271 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n4:1767:1936 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r6i4n5:37342:37536 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n7:8763:8884 [3] NCCL INFO comm 0x154ca0001060 rank 1 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n7:8761:8886 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i6n8:29153:29264 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i5n3:79996:80112 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i4n5:46159:46271 [1] NCCL INFO comm 0x14ddac001060 rank 1 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i4n4:1767:1936 [1] NCCL INFO comm 0x1498e4001060 rank 0 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i3n0:38601:38711 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r6i4n5:37342:37536 [3] NCCL INFO comm 0x15289c001060 rank 0 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n2:57610:57721 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i7n0:55676:55788 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i6n8:29153:29264 [1] NCCL INFO comm 0x154100001060 rank 1 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i5n3:79997:80111 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r9i1n7:8762:8885 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r7i4n4:1767:1767 [1] NCCL INFO Launch mode Parallel +r7i1n3:4944:5056 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i3n0:38600:38710 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i2n6:895:1010 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r6i4n5:37342:37342 [3] NCCL INFO Launch mode Parallel +r7i7n2:57611:57726 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r7i7n0:55676:55788 [3] NCCL INFO comm 0x150d08001060 rank 0 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n5:40816:40928 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i1n3:4944:5056 [3] NCCL INFO comm 0x14515c001060 rank 1 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i3n0:38600:38710 [2] NCCL INFO comm 0x145240001060 rank 1 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r8i0n3:57934:58045 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r9i1n6:58313:58430 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i7n0:55676:55676 [3] NCCL INFO Launch mode Parallel +r7i4n5:46160:46272 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r7i6n8:29155:29265 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r9i1n5:40816:40928 [3] NCCL INFO comm 0x152a00001060 rank 1 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n4:24821:24936 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r8i0n3:57935:58044 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i2n6:894:1008 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n6:58314:58426 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r9i1n4:24821:24936 [3] NCCL INFO comm 0x150ddc001060 rank 0 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i2n6:894:1008 [2] NCCL INFO comm 0x152974001060 rank 0 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n4:24821:24821 [3] NCCL INFO Launch mode Parallel +r7i6n8:29154:29266 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i2n6:894:894 [2] NCCL INFO Launch mode Parallel +r7i3n0:38598:38709 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i3n0:38599:38712 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r8i0n3:57933:58047 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r8i0n3:57932:58046 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n2:57608:57725 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i6n8:29154:29266 [2] NCCL INFO comm 0x147ca4001060 rank 1 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r7i3n0:38599:38712 [1] NCCL INFO comm 0x14da14001060 rank 1 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i3n0:38598:38709 [0] NCCL INFO comm 0x14eaec001060 rank 1 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r8i0n3:57933:58047 [1] NCCL INFO comm 0x150768001060 rank 1 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r8i0n3:57932:58046 [0] NCCL INFO comm 0x1526d0001060 rank 1 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n2:57609:57723 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n2:57608:57725 [0] NCCL INFO comm 0x14fb2c001060 rank 0 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n7:8761:8886 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i2n6:892:1006 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n6:58312:58428 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i4n4:1768:1935 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i5n3:79997:80111 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i3n0:38601:38711 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n2:57608:57608 [0] NCCL INFO Launch mode Parallel +r7i7n2:57609:57723 [1] NCCL INFO comm 0x14c9e8001060 rank 0 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n2:57609:57609 [1] NCCL INFO Launch mode Parallel +r9i1n7:8762:8885 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i2n6:893:1004 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i2n6:892:1006 [0] NCCL INFO comm 0x151b34001060 rank 0 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r7i2n6:893:1004 [1] NCCL INFO comm 0x1532f8001060 rank 0 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i2n6:895:1010 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i2n6:892:892 [0] NCCL INFO Launch mode Parallel +r7i2n6:893:893 [1] NCCL INFO Launch mode Parallel +r7i6n8:29155:29265 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n6:58313:58430 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i5n3:79996:80112 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i5n3:79997:80111 [3] NCCL INFO comm 0x1512cc001060 rank 0 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i5n3:79997:79997 [3] NCCL INFO Launch mode Parallel +r7i3n0:38601:38711 [3] NCCL INFO comm 0x14861c001060 rank 1 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i2n6:895:1010 [3] NCCL INFO comm 0x149ae4001060 rank 0 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i2n6:895:895 [3] NCCL INFO Launch mode Parallel +r7i7n2:57610:57721 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n2:57611:57726 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n7:8762:8885 [2] NCCL INFO comm 0x145be8001060 rank 1 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n7:8761:8886 [1] NCCL INFO comm 0x154668001060 rank 1 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r8i0n3:57935:58044 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r8i0n3:57934:58045 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i6n8:29155:29265 [3] NCCL INFO comm 0x14bc28001060 rank 1 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i5n3:79996:80112 [2] NCCL INFO comm 0x14f6ec001060 rank 0 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n6:58313:58430 [1] NCCL INFO comm 0x1508a8001060 rank 0 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n6:58314:58426 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n6:58313:58313 [1] NCCL INFO Launch mode Parallel +r7i4n4:1769:1932 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r7i7n2:57611:57726 [3] NCCL INFO comm 0x1502d8001060 rank 0 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n2:57610:57721 [2] NCCL INFO comm 0x14f1c4001060 rank 0 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n5:46161:46270 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r8i0n3:57934:58045 [2] NCCL INFO comm 0x1546bc001060 rank 1 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r8i0n3:57935:58044 [3] NCCL INFO comm 0x151da0001060 rank 1 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n6:58314:58426 [2] NCCL INFO comm 0x14c838001060 rank 0 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n6:58314:58314 [2] NCCL INFO Launch mode Parallel +r7i5n3:79996:79996 [2] NCCL INFO Launch mode Parallel +r9i1n7:8760:8887 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n2:57610:57610 [2] NCCL INFO Launch mode Parallel +r7i7n2:57611:57611 [3] NCCL INFO Launch mode Parallel +r7i4n5:46160:46272 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n7:8760:8887 [0] NCCL INFO comm 0x14db40001060 rank 1 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n5:46160:46272 [2] NCCL INFO comm 0x14a674001060 rank 1 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n6:58312:58428 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n4:1768:1935 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n6:58312:58428 [0] NCCL INFO comm 0x14a694001060 rank 0 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n4:1768:1935 [2] NCCL INFO comm 0x153830001060 rank 0 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n6:58312:58312 [0] NCCL INFO Launch mode Parallel +r7i4n4:1768:1768 [2] NCCL INFO Launch mode Parallel +r7i4n5:46161:46270 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n4:1769:1932 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n5:46161:46270 [3] NCCL INFO comm 0x154314001060 rank 1 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i4n4:1769:1932 [3] NCCL INFO comm 0x149e18001060 rank 0 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i4n4:1769:1769 [3] NCCL INFO Launch mode Parallel +r7i5n3:79994:80114 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i4n5:46158:46275 [0] NCCL INFO Channel 00/02 : 0 1 +r7i4n5:46158:46275 [0] NCCL INFO Channel 01/02 : 0 1 +r7i5n3:79994:80114 [0] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i5n3:79994:80114 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i4n5:46158:46275 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i4n5:46158:46275 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i4n5:46158:46275 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i6n8:29152:29269 [0] NCCL INFO Channel 00/02 : 0 1 +r7i6n8:29152:29269 [0] NCCL INFO Channel 01/02 : 0 1 +r7i7n0:55673:55795 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n0:55673:55795 [0] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i7n0:55673:55795 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i6n8:29152:29269 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i6n8:29152:29269 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i6n8:29152:29269 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i6n8:29153:29274 [1] NCCL INFO Channel 00/02 : 0 1 +r7i6n8:29153:29274 [1] NCCL INFO Channel 01/02 : 0 1 +r7i4n5:46159:46279 [1] NCCL INFO Channel 00/02 : 0 1 +r7i5n3:79995:80118 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n0:55674:55797 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n0:55674:55797 [1] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i6n8:29153:29274 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i6n8:29153:29274 [1] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i4n5:46159:46279 [1] NCCL INFO Channel 01/02 : 0 1 +r7i6n8:29153:29274 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n5:46159:46279 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i4n5:46159:46279 [1] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i4n5:46159:46279 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i5n3:79995:80118 [1] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i5n3:79995:80118 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n0:55674:55797 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n5:46158:46275 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i5n3:79994:80114 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r9i1n5:40816:40939 [3] NCCL INFO Channel 00/02 : 0 1 +r9i1n5:40816:40939 [3] NCCL INFO Channel 01/02 : 0 1 +r9i1n6:58315:58435 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n6:58315:58435 [3] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i4n5:46160:46282 [2] NCCL INFO Channel 00/02 : 0 1 +r7i5n3:79996:80119 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n5:40816:40939 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n5:40816:40939 [3] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r9i1n5:40816:40939 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n6:58315:58435 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i6n8:29154:29276 [2] NCCL INFO Channel 00/02 : 0 1 +r7i4n5:46160:46282 [2] NCCL INFO Channel 01/02 : 0 1 +r7i4n5:46160:46282 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n0:55675:55798 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i4n5:46160:46282 [2] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i4n5:46160:46282 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i5n3:79996:80119 [2] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i5n3:79996:80119 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i6n8:29154:29276 [2] NCCL INFO Channel 01/02 : 0 1 +r7i6n8:29154:29276 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i6n8:29154:29276 [2] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i6n8:29154:29276 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i6n8:29155:29278 [3] NCCL INFO Channel 00/02 : 0 1 +r7i6n8:29155:29278 [3] NCCL INFO Channel 01/02 : 0 1 +r7i7n0:55675:55798 [2] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i7n0:55675:55798 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n0:55676:55799 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n0:55676:55799 [3] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i7n0:55676:55799 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i6n8:29155:29278 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i6n8:29155:29278 [3] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i6n8:29155:29278 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i4n5:46161:46284 [3] NCCL INFO Channel 00/02 : 0 1 +r7i5n3:79997:80120 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i4n5:46161:46284 [3] NCCL INFO Channel 01/02 : 0 1 +r7i5n3:79997:80120 [3] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i4n5:46161:46284 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i5n3:79997:80120 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i4n5:46161:46284 [3] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i4n5:46161:46284 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n0:55673:55795 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i6n8:29152:29269 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r9i1n7:8760:8892 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n7:8760:8892 [0] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r6i4n5:37339:37544 [0] NCCL INFO Channel 00/02 : 0 1 +r6i4n5:37339:37544 [0] NCCL INFO Channel 01/02 : 0 1 +r9i1n7:8760:8892 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r6i4n5:37339:37544 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r6i4n5:37339:37544 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r6i4n5:37339:37544 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i4n4:1766:1941 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i3n0:38598:38720 [0] NCCL INFO Channel 00/02 : 0 1 +r7i3n0:38598:38720 [0] NCCL INFO Channel 01/02 : 0 1 +r7i4n4:1766:1941 [0] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i4n4:1766:1941 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i3n0:38598:38720 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i3n0:38598:38720 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i3n0:38598:38720 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i1n3:4943:5059 [2] NCCL INFO Channel 00/02 : 0 1 +r7i1n3:4943:5059 [2] NCCL INFO Channel 01/02 : 0 1 +r7i2n6:894:1015 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i2n6:894:1015 [2] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i2n6:894:1015 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i1n3:4943:5059 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n5:40814:40936 [1] NCCL INFO Channel 00/02 : 0 1 +r7i1n3:4943:5059 [2] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i1n3:4943:5059 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n6:58313:58436 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n6:58313:58436 [1] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i4n4:1767:1942 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i3n0:38599:38725 [1] NCCL INFO Channel 00/02 : 0 1 +r9i1n4:24819:24942 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r8i0n3:57933:58053 [1] NCCL INFO Channel 00/02 : 0 1 +r8i0n3:57933:58053 [1] NCCL INFO Channel 01/02 : 0 1 +r9i1n5:40814:40936 [1] NCCL INFO Channel 01/02 : 0 1 +r9i1n5:40814:40936 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n5:40814:40936 [1] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r9i1n5:40814:40936 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n6:58313:58436 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n4:1767:1942 [1] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i4n4:1767:1942 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i3n0:38599:38725 [1] NCCL INFO Channel 01/02 : 0 1 +r7i3n0:38599:38725 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i3n0:38599:38725 [1] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i3n0:38599:38725 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n4:24819:24942 [1] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r9i1n4:24819:24942 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r8i0n3:57933:58053 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n5:40815:40931 [2] NCCL INFO Channel 00/02 : 0 1 +r8i0n3:57933:58053 [1] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r8i0n3:57933:58053 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n6:58314:58437 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n6:58314:58437 [2] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r9i1n6:58314:58437 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n5:40815:40931 [2] NCCL INFO Channel 01/02 : 0 1 +r9i1n5:40815:40931 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n5:40815:40931 [2] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r9i1n5:40815:40931 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n5:40813:40940 [0] NCCL INFO Channel 00/02 : 0 1 +r9i1n5:40813:40940 [0] NCCL INFO Channel 01/02 : 0 1 +r9i1n5:40813:40940 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n5:40813:40940 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r9i1n5:40813:40940 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n6:58312:58438 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n6:58312:58438 [0] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r9i1n6:58312:58438 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n1:68405:68522 [1] NCCL INFO Channel 00/02 : 0 1 +r7i7n1:68405:68522 [1] NCCL INFO Channel 01/02 : 0 1 +r7i7n2:57609:57732 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n2:57609:57732 [1] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i2n6:892:1016 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i1n3:4941:5062 [0] NCCL INFO Channel 00/02 : 0 1 +r7i4n4:1768:1943 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i3n0:38600:38718 [2] NCCL INFO Channel 00/02 : 0 1 +r7i7n2:57609:57732 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n1:68405:68522 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n2:57608:57731 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n1:68405:68522 [1] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i7n2:57608:57731 [0] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i7n1:68405:68522 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n2:57608:57731 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n1:68404:68527 [0] NCCL INFO Channel 00/02 : 0 1 +r7i2n6:892:1016 [0] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i2n6:892:1016 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i1n3:4941:5062 [0] NCCL INFO Channel 01/02 : 0 1 +r7i7n1:68404:68527 [0] NCCL INFO Channel 01/02 : 0 1 +r7i2n6:895:1017 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i1n3:4941:5062 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i5n3:79995:80118 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i7n1:68404:68527 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i1n3:4941:5062 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i7n1:68404:68527 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i1n3:4941:5062 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i4n4:1768:1943 [2] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i7n1:68404:68527 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i1n3:4944:5068 [3] NCCL INFO Channel 00/02 : 0 1 +r7i6n8:29153:29274 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i4n4:1768:1943 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n4:1769:1944 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i4n4:1769:1944 [3] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i4n4:1769:1944 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i3n0:38600:38718 [2] NCCL INFO Channel 01/02 : 0 1 +r7i3n0:38600:38718 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i4n5:46159:46279 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i3n0:38600:38718 [2] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i3n0:38600:38718 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i3n0:38601:38722 [3] NCCL INFO Channel 00/02 : 0 1 +r7i3n0:38601:38722 [3] NCCL INFO Channel 01/02 : 0 1 +r7i7n0:55674:55797 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i7n2:57610:57733 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i3n0:38601:38722 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i3n0:38601:38722 [3] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i7n1:68406:68524 [2] NCCL INFO Channel 00/02 : 0 1 +r7i3n0:38601:38722 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n1:68406:68524 [2] NCCL INFO Channel 01/02 : 0 1 +r7i2n6:895:1017 [3] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i2n6:895:1017 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i2n6:893:1018 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i1n3:4944:5068 [3] NCCL INFO Channel 01/02 : 0 1 +r7i2n6:893:1018 [1] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i1n3:4944:5068 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i2n6:893:1018 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i1n3:4944:5068 [3] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i1n3:4944:5068 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i1n3:4942:5065 [1] NCCL INFO Channel 00/02 : 0 1 +r7i1n3:4942:5065 [1] NCCL INFO Channel 01/02 : 0 1 +r7i1n3:4942:5065 [1] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i1n3:4942:5065 [1] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i1n3:4942:5065 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r8i0n3:57934:58055 [2] NCCL INFO Channel 00/02 : 0 1 +r9i1n4:24820:24943 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n2:57610:57733 [2] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i7n1:68406:68524 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n2:57610:57733 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n1:68406:68524 [2] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i7n1:68406:68524 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n2:57611:57734 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n1:68407:68530 [3] NCCL INFO Channel 00/02 : 0 1 +r7i7n2:57611:57734 [3] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r7i7n1:68407:68530 [3] NCCL INFO Channel 01/02 : 0 1 +r7i7n2:57611:57734 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n1:68407:68530 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r7i7n1:68407:68530 [3] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r7i7n1:68407:68530 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i4n5:46158:46275 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r8i0n3:57934:58055 [2] NCCL INFO Channel 01/02 : 0 1 +r8i0n3:57934:58055 [2] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r8i0n3:57934:58055 [2] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r8i0n3:57934:58055 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n4:24820:24943 [2] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r8i0n3:57932:58057 [0] NCCL INFO Channel 00/02 : 0 1 +r8i0n3:57932:58057 [0] NCCL INFO Channel 01/02 : 0 1 +r9i1n4:24820:24943 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r8i0n3:57932:58057 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r9i1n4:24818:24944 [0] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r8i0n3:57932:58057 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r9i1n4:24818:24944 [0] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r9i1n4:24818:24944 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r8i0n3:57932:58057 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n4:24821:24945 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r8i0n3:57935:58059 [3] NCCL INFO Channel 00/02 : 0 1 +r9i1n4:24821:24945 [3] NCCL INFO Trees [0] -1/-1/-1->1->0|0->1->-1/-1/-1 [1] 0/-1/-1->1->-1|-1->1->0/-1/-1 +r8i0n3:57935:58059 [3] NCCL INFO Channel 01/02 : 0 1 +r9i1n4:24821:24945 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r8i0n3:57935:58059 [3] NCCL INFO threadThresholds 8/8/64 | 16/8/64 | 8/8/64 +r8i0n3:57935:58059 [3] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] -1/-1/-1->0->1|1->0->-1/-1/-1 +r8i0n3:57935:58059 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i5n3:79994:80114 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r9i1n5:40816:40939 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i5n3:79996:80119 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r7i4n5:46160:46282 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i7n0:55675:55798 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r7i6n8:29154:29276 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i7n0:55676:55799 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r9i1n7:8760:8892 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r9i1n6:58315:58435 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i7n0:55673:55795 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r6i4n5:37339:37544 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i6n8:29152:29269 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i6n8:29155:29278 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i5n3:79997:80120 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i4n4:1766:1941 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i4n5:46161:46284 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i3n0:38598:38720 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i2n6:894:1015 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r7i4n4:1767:1942 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i1n3:4943:5059 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r9i1n6:58313:58436 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r9i1n4:24819:24942 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i3n0:38599:38725 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r9i1n5:40814:40936 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r8i0n3:57933:58053 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r9i1n6:58312:58438 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r9i1n5:40813:40940 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i5n3:79995:80118 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i3n0:38600:38718 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i7n1:68405:68522 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i6n8:29153:29274 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i2n6:892:1016 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i4n5:46159:46279 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i4n4:1768:1943 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r7i2n6:893:1018 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i7n0:55674:55797 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i1n3:4941:5062 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i7n2:57609:57732 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i4n4:1769:1944 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i7n2:57608:57731 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i7n1:68404:68527 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r9i1n6:58314:58437 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r7i3n0:38601:38722 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r9i1n5:40815:40931 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i2n6:895:1017 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i7n2:57610:57733 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r7i7n1:68406:68524 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i1n3:4942:5065 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i7n2:57611:57734 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i1n3:4944:5068 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i7n1:68407:68530 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r8i0n3:57934:58055 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r9i1n4:24820:24943 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r9i1n5:40816:40939 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r8i0n3:57935:58059 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r9i1n4:24818:24944 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i5n3:79996:80119 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r9i1n7:8760:8892 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i4n5:46160:46282 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r8i0n3:57932:58057 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r9i1n4:24821:24945 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i7n0:55675:55798 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r7i6n8:29154:29276 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r6i4n5:37339:37544 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i7n0:55676:55799 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r9i1n6:58315:58435 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i4n5:46158:46275 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i6n8:29155:29278 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r7i4n4:1766:1941 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i5n3:79994:80114 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i3n0:38598:38720 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i4n5:46161:46284 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r7i5n3:79997:80120 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i2n6:894:1015 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r7i7n0:55673:55795 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i1n3:4943:5059 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i3n0:38599:38725 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r9i1n4:24819:24942 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i4n4:1767:1942 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r8i0n3:57933:58053 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i6n8:29152:29269 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r9i1n6:58313:58436 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r9i1n5:40814:40936 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r9i1n5:40813:40940 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r9i1n6:58312:58438 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i7n1:68405:68522 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i3n0:38600:38718 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i7n1:68404:68527 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r9i1n5:40815:40931 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i4n4:1768:1943 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r9i1n6:58314:58437 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r7i2n6:892:1016 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i7n2:57609:57732 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i4n4:1769:1944 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i2n6:893:1018 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i3n0:38601:38722 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r7i2n6:895:1017 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i1n3:4941:5062 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i7n2:57608:57731 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i1n3:4944:5068 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r7i7n2:57610:57733 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r7i7n1:68406:68524 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i7n2:57611:57734 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i1n3:4942:5065 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r8i0n3:57934:58055 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i7n1:68407:68530 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r9i1n4:24820:24943 [2] NCCL INFO Channel 00 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r8i0n3:57935:58059 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r9i1n4:24818:24944 [0] NCCL INFO Channel 00 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i5n3:79995:80118 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i6n8:29153:29274 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r8i0n3:57932:58057 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i4n5:46159:46279 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r9i1n4:24821:24945 [3] NCCL INFO Channel 00 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i7n0:55674:55797 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i4n5:46158:46275 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r9i1n7:8760:8892 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i4n5:46160:46282 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i5n3:79994:80114 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i5n3:79996:80119 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r7i7n0:55675:55798 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r9i1n5:40816:40939 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r6i4n5:37339:37544 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i6n8:29154:29276 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r9i1n6:58315:58435 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i7n0:55676:55799 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i7n0:55673:55795 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i4n5:46158:46275 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n5:46158:46275 [0] NCCL INFO comm 0x14ea8c001060 rank 0 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n4:1766:1941 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i6n8:29152:29269 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i5n3:79994:80114 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n5:46158:46158 [0] NCCL INFO Launch mode Parallel +r7i6n8:29155:29278 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i5n3:79997:80120 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i5n3:79994:80114 [0] NCCL INFO comm 0x14ceb0005fc0 rank 1 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n5:46161:46284 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i3n0:38598:38720 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i2n6:894:1015 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r7i4n4:1767:1942 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i1n3:4943:5059 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i7n0:55673:55795 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r8i0n3:57933:58053 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i3n0:38599:38725 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r9i1n4:24819:24942 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i6n8:29152:29269 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n0:55673:55795 [0] NCCL INFO comm 0x148ec0005fc0 rank 1 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r7i6n8:29152:29269 [0] NCCL INFO comm 0x1500c8001060 rank 0 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n5:46159:46279 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i5n3:79995:80118 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i6n8:29152:29152 [0] NCCL INFO Launch mode Parallel +r9i1n6:58313:58436 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i3n0:38600:38718 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i7n1:68405:68522 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r9i1n7:8760:8892 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i6n8:29153:29274 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r9i1n5:40813:40940 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r9i1n6:58312:58438 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i7n0:55674:55797 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i3n0:38601:38722 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r9i1n5:40814:40936 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r9i1n5:40815:40931 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r9i1n6:58314:58437 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r7i4n4:1769:1944 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i1n3:4941:5062 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i7n2:57609:57732 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i7n1:68404:68527 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i4n4:1768:1943 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r6i4n5:37339:37544 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i7n2:57608:57731 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i7n1:68406:68524 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i5n3:79995:80118 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n5:46159:46279 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i2n6:895:1017 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i7n0:55675:55798 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r9i1n4:24818:24944 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i1n3:4944:5068 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r9i1n5:40816:40939 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r7i7n1:68407:68530 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i5n3:79995:80118 [1] NCCL INFO comm 0x15059c005fc0 rank 1 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i4n5:46160:46282 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i4n5:46159:46279 [1] NCCL INFO comm 0x14ddb0001060 rank 0 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i2n6:892:1016 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i2n6:893:1018 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i1n3:4942:5065 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i6n8:29153:29274 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i5n3:79996:80119 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r7i4n5:46159:46159 [1] NCCL INFO Launch mode Parallel +r7i7n2:57610:57733 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r7i6n8:29153:29274 [1] NCCL INFO comm 0x154104001060 rank 0 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n2:57611:57734 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i6n8:29154:29276 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r9i1n4:24821:24945 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i6n8:29153:29153 [1] NCCL INFO Launch mode Parallel +r9i1n7:8760:8892 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n6:58315:58435 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r8i0n3:57935:58059 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i7n0:55674:55797 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n4:24820:24943 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r9i1n7:8760:8892 [0] NCCL INFO comm 0x14db40005f50 rank 1 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r8i0n3:57932:58057 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [receive] via NET/IB/3 +r8i0n3:57934:58055 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [receive] via NET/IB/2 +r7i7n0:55674:55797 [1] NCCL INFO comm 0x14db28005fc0 rank 1 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n0:55676:55799 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i4n4:1766:1941 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r6i4n5:37339:37544 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i6n8:29155:29278 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r6i4n5:37339:37544 [0] NCCL INFO comm 0x147ac8001060 rank 0 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n5:46161:46284 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r7i5n3:79997:80120 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r6i4n5:37339:37339 [0] NCCL INFO Launch mode Parallel +r7i4n5:46160:46282 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n0:55675:55798 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i5n3:79996:80119 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i3n0:38598:38720 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i4n5:46160:46282 [2] NCCL INFO comm 0x14a654001060 rank 0 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE + > loading doc-idx mapping from /gpfsscratch/rech/eha/commun/datasets-custom/openwebtext-10k/meg-gpt2_text_document_train_indexmap_1024000ns_1024sl_1234s_doc_idx.npy +r9i1n5:40816:40939 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n0:55675:55798 [2] NCCL INFO comm 0x148d64005fc0 rank 1 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r7i5n3:79996:80119 [2] NCCL INFO comm 0x14f6ec005fc0 rank 1 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n5:46160:46160 [2] NCCL INFO Launch mode Parallel +r9i1n5:40816:40939 [3] NCCL INFO comm 0x152a04001060 rank 0 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i6n8:29154:29276 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i2n6:894:1015 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r9i1n5:40816:40816 [3] NCCL INFO Launch mode Parallel +r7i4n4:1767:1942 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r9i1n6:58315:58435 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i1n3:4943:5059 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r9i1n6:58315:58435 [3] NCCL INFO comm 0x1451c4005fc0 rank 1 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r8i0n3:57933:58053 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i7n0:55676:55799 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i6n8:29154:29276 [2] NCCL INFO comm 0x147c84001060 rank 0 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r7i3n0:38599:38725 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r9i1n4:24819:24942 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i6n8:29154:29154 [2] NCCL INFO Launch mode Parallel +r7i7n0:55676:55799 [3] NCCL INFO comm 0x150d08005fc0 rank 1 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i4n4:1766:1941 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i6n8:29155:29278 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n4:1766:1941 [0] NCCL INFO comm 0x14e388005fc0 rank 1 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n5:46161:46284 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i5n3:79997:80120 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i6n8:29155:29278 [3] NCCL INFO comm 0x14bc2c001060 rank 0 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i5n3:79997:80120 [3] NCCL INFO comm 0x1512cc005fc0 rank 1 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i4n5:46161:46284 [3] NCCL INFO comm 0x154318001060 rank 0 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i6n8:29155:29155 [3] NCCL INFO Launch mode Parallel +r7i4n5:46161:46161 [3] NCCL INFO Launch mode Parallel +r7i7n1:68405:68522 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i2n6:894:1015 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i3n0:38598:38720 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i3n0:38600:38718 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i2n6:894:1015 [2] NCCL INFO comm 0x152974005fc0 rank 1 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n6:58313:58436 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i1n3:4943:5059 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i3n0:38598:38720 [0] NCCL INFO comm 0x14eacc001060 rank 0 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n4:1767:1942 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n6:58314:58437 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r7i3n0:38601:38722 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r9i1n6:58312:58438 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i3n0:38598:38598 [0] NCCL INFO Launch mode Parallel +r7i4n4:1767:1942 [1] NCCL INFO comm 0x1498e4005fc0 rank 1 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i1n3:4943:5059 [2] NCCL INFO comm 0x153094001060 rank 0 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r8i0n3:57933:58053 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n5:40815:40931 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i7n1:68404:68527 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r9i1n5:40813:40940 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r9i1n4:24819:24942 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i1n3:4943:4943 [2] NCCL INFO Launch mode Parallel +r8i0n3:57933:58053 [1] NCCL INFO comm 0x15076c001060 rank 0 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n5:40814:40936 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r9i1n4:24819:24942 [1] NCCL INFO comm 0x14f144005fc0 rank 1 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r8i0n3:57933:57933 [1] NCCL INFO Launch mode Parallel +r7i1n3:4941:5062 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i4n4:1768:1943 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r7i7n2:57609:57732 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i7n2:57608:57731 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i4n4:1769:1944 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i3n0:38599:38725 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n4:24818:24944 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i7n1:68407:68530 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r7i3n0:38599:38725 [1] NCCL INFO comm 0x14da18001060 rank 0 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i1n3:4944:5068 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r7i3n0:38599:38599 [1] NCCL INFO Launch mode Parallel +r7i2n6:895:1017 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i7n1:68406:68524 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i1n3:4942:5065 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r9i1n4:24821:24945 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i7n2:57611:57734 [3] NCCL INFO Channel 01 : 1[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i7n2:57610:57733 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r9i1n6:58314:58437 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i3n0:38601:38722 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i3n0:38600:38718 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n1:68405:68522 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i2n6:892:1016 [0] NCCL INFO Channel 01 : 1[1a000] -> 0[1a000] [send] via NET/IB/3 +r9i1n6:58312:58438 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n6:58313:58436 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i2n6:893:1018 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1c000] [send] via NET/IB/3 +r9i1n4:24820:24943 [2] NCCL INFO Channel 01 : 1[88000] -> 0[88000] [send] via NET/IB/2 +r8i0n3:57935:58059 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r9i1n6:58314:58437 [2] NCCL INFO comm 0x14c838005fc0 rank 1 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r7i3n0:38601:38722 [3] NCCL INFO comm 0x148620001060 rank 0 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i3n0:38600:38718 [2] NCCL INFO comm 0x145220001060 rank 0 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n1:68405:68522 [1] NCCL INFO comm 0x14aa28001060 rank 0 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n6:58312:58438 [0] NCCL INFO comm 0x14a694005fc0 rank 1 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n6:58313:58436 [1] NCCL INFO comm 0x1508a8005fc0 rank 1 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r8i0n3:57934:58055 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i7n1:68404:68527 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i3n0:38601:38601 [3] NCCL INFO Launch mode Parallel +r7i3n0:38600:38600 [2] NCCL INFO Launch mode Parallel +r9i1n5:40815:40931 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r8i0n3:57932:58057 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i7n1:68405:68405 [1] NCCL INFO Launch mode Parallel +r9i1n5:40813:40940 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n5:40814:40936 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n1:68404:68527 [0] NCCL INFO comm 0x1451f4001060 rank 0 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n5:40815:40931 [2] NCCL INFO comm 0x150d9c001060 rank 0 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n1:68404:68404 [0] NCCL INFO Launch mode Parallel +r9i1n5:40813:40940 [0] NCCL INFO comm 0x14fff0001060 rank 0 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n5:40814:40936 [1] NCCL INFO comm 0x145bfc001060 rank 0 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i4n4:1768:1943 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n4:1769:1944 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i1n3:4944:5068 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n5:40815:40815 [2] NCCL INFO Launch mode Parallel +r9i1n5:40814:40814 [1] NCCL INFO Launch mode Parallel +r9i1n5:40813:40813 [0] NCCL INFO Launch mode Parallel +r7i4n4:1769:1944 [3] NCCL INFO comm 0x149e18005fc0 rank 1 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i4n4:1768:1943 [2] NCCL INFO comm 0x153830005fc0 rank 1 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r7i2n6:895:1017 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i1n3:4944:5068 [3] NCCL INFO comm 0x145160001060 rank 0 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n2:57608:57731 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n2:57609:57732 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i1n3:4941:5062 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i1n3:4944:4944 [3] NCCL INFO Launch mode Parallel +r7i7n2:57609:57732 [1] NCCL INFO comm 0x14c9e8005fc0 rank 1 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i1n3:4942:5065 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i1n3:4941:5062 [0] NCCL INFO comm 0x151474001060 rank 0 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n2:57608:57731 [0] NCCL INFO comm 0x14fb2c005fc0 rank 1 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n1:68407:68530 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n1:68406:68524 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i1n3:4941:4941 [0] NCCL INFO Launch mode Parallel +r7i1n3:4942:5065 [1] NCCL INFO comm 0x14e62c001060 rank 0 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i2n6:895:1017 [3] NCCL INFO comm 0x149ae4005fc0 rank 1 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i1n3:4942:4942 [1] NCCL INFO Launch mode Parallel +r7i7n1:68407:68530 [3] NCCL INFO comm 0x14d4f4001060 rank 0 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n1:68406:68524 [2] NCCL INFO comm 0x153cbc001060 rank 0 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n1:68406:68406 [2] NCCL INFO Launch mode Parallel +r7i7n1:68407:68407 [3] NCCL INFO Launch mode Parallel +r9i1n4:24818:24944 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n4:24821:24945 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i2n6:892:1016 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i2n6:893:1018 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n4:24818:24944 [0] NCCL INFO comm 0x150ed0005fc0 rank 1 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n4:24821:24945 [3] NCCL INFO comm 0x150ddc005fc0 rank 1 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n4:24820:24943 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n2:57611:57734 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n2:57610:57733 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n4:24820:24943 [2] NCCL INFO comm 0x151d78005fc0 rank 1 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n2:57611:57734 [3] NCCL INFO comm 0x1502d8005fc0 rank 1 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n2:57610:57733 [2] NCCL INFO comm 0x14f1c4005fc0 rank 1 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r8i0n3:57932:58057 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r8i0n3:57935:58059 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r8i0n3:57934:58055 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r8i0n3:57932:58057 [0] NCCL INFO comm 0x1526b0001060 rank 0 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r8i0n3:57932:57932 [0] NCCL INFO Launch mode Parallel +r7i2n6:893:1018 [1] NCCL INFO comm 0x1532f8005fc0 rank 1 nranks 2 cudaDev 1 busId 1c000 - Init COMPLETE +r7i2n6:892:1016 [0] NCCL INFO comm 0x151b34005fc0 rank 1 nranks 2 cudaDev 0 busId 1a000 - Init COMPLETE +r8i0n3:57935:58059 [3] NCCL INFO comm 0x151da4001060 rank 0 nranks 2 cudaDev 3 busId 8a000 - Init COMPLETE +r8i0n3:57934:58055 [2] NCCL INFO comm 0x15469c001060 rank 0 nranks 2 cudaDev 2 busId 88000 - Init COMPLETE +r8i0n3:57935:57935 [3] NCCL INFO Launch mode Parallel +r8i0n3:57934:57934 [2] NCCL INFO Launch mode Parallel + > loading sample-idx mapping from /gpfsscratch/rech/eha/commun/datasets-custom/openwebtext-10k/meg-gpt2_text_document_train_indexmap_1024000ns_1024sl_1234s_sample_idx.npy + > loading shuffle-idx mapping from /gpfsscratch/rech/eha/commun/datasets-custom/openwebtext-10k/meg-gpt2_text_document_train_indexmap_1024000ns_1024sl_1234s_shuffle_idx.npy + loaded indexed file in 0.022 seconds + total number of samples: 1024856 + total number of epochs: 99 + > loading doc-idx mapping from /gpfsscratch/rech/eha/commun/datasets-custom/openwebtext-10k/meg-gpt2_text_document_valid_indexmap_112640ns_1024sl_1234s_doc_idx.npy + > loading sample-idx mapping from /gpfsscratch/rech/eha/commun/datasets-custom/openwebtext-10k/meg-gpt2_text_document_valid_indexmap_112640ns_1024sl_1234s_sample_idx.npy + > loading shuffle-idx mapping from /gpfsscratch/rech/eha/commun/datasets-custom/openwebtext-10k/meg-gpt2_text_document_valid_indexmap_112640ns_1024sl_1234s_shuffle_idx.npy + loaded indexed file in 0.007 seconds + total number of samples: 113200 + total number of epochs: 182 + > loading doc-idx mapping from /gpfsscratch/rech/eha/commun/datasets-custom/openwebtext-10k/meg-gpt2_text_document_test_indexmap_10240ns_1024sl_1234s_doc_idx.npy + > loading sample-idx mapping from /gpfsscratch/rech/eha/commun/datasets-custom/openwebtext-10k/meg-gpt2_text_document_test_indexmap_10240ns_1024sl_1234s_sample_idx.npy + > loading shuffle-idx mapping from /gpfsscratch/rech/eha/commun/datasets-custom/openwebtext-10k/meg-gpt2_text_document_test_indexmap_10240ns_1024sl_1234s_shuffle_idx.npy + loaded indexed file in 0.004 seconds + total number of samples: 10255 + total number of epochs: 672 +> finished creating GPT2 datasets ... +r8i0n3:57934:58067 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i0n3:57935:58066 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n4:24819:24951 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n4:24818:24952 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n4:24820:24953 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n4:24821:24954 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n4:24818:24952 [0] NCCL INFO Trees [0] 49/40/56->48->32|32->48->49/40/56 [1] 49/-1/-1->48->51|51->48->49/-1/-1 [2] 49/-1/-1->48->44|44->48->49/-1/-1 [3] 49/-1/-1->48->51|51->48->49/-1/-1 +r9i1n4:24820:24953 [2] NCCL INFO Trees [0] 51/-1/-1->50->49|49->50->51/-1/-1 [1] 51/42/58->50->34|34->50->51/42/58 [2] 51/-1/-1->50->49|49->50->51/-1/-1 [3] 51/-1/-1->50->46|46->50->51/-1/-1 +r9i1n5:40815:40948 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n5:40813:40946 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n5:40814:40945 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n5:40816:40947 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n5:40815:40948 [2] NCCL INFO Trees [0] 55/-1/-1->54->53|53->54->55/-1/-1 [1] 55/-1/-1->54->58|58->54->55/-1/-1 [2] 55/-1/-1->54->53|53->54->55/-1/-1 [3] 55/46/62->54->38|38->54->55/46/62 +r9i1n6:58315:58445 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n6:58314:58444 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n6:58313:58443 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n6:58315:58445 [3] NCCL INFO Trees [0] -1/-1/-1->59->58|58->59->-1/-1/-1 [1] 56/-1/-1->59->58|58->59->56/-1/-1 [2] -1/-1/-1->59->58|58->59->-1/-1/-1 [3] 56/-1/-1->59->58|58->59->56/-1/-1 +r9i1n6:58314:58444 [2] NCCL INFO Trees [0] 59/-1/-1->58->57|57->58->59/-1/-1 [1] 59/54/62->58->50|50->58->59/54/62 [2] 59/-1/-1->58->57|57->58->59/-1/-1 [3] 59/-1/-1->58->62|62->58->59/-1/-1 +r9i1n6:58312:58446 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n6:58313:58443 [1] NCCL INFO Trees [0] 58/-1/-1->57->56|56->57->58/-1/-1 [1] -1/-1/-1->57->56|56->57->-1/-1/-1 [2] 58/-1/-1->57->56|56->57->58/-1/-1 [3] -1/-1/-1->57->56|56->57->-1/-1/-1 +r8i0n3:57935:58066 [3] NCCL INFO Trees [0] -1/-1/-1->47->46|46->47->-1/-1/-1 [1] 44/-1/-1->47->46|46->47->44/-1/-1 [2] -1/-1/-1->47->46|46->47->-1/-1/-1 [3] 44/-1/-1->47->46|46->47->44/-1/-1 +r8i0n3:57934:58067 [2] NCCL INFO Trees [0] 47/-1/-1->46->45|45->46->47/-1/-1 [1] 47/-1/-1->46->42|42->46->47/-1/-1 [2] 47/-1/-1->46->45|45->46->47/-1/-1 [3] 47/42/50->46->54|54->46->47/42/50 +r8i0n3:57935:58066 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r8i0n3:57934:58067 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r8i0n3:57933:58064 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n2:57611:57741 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i0n3:57933:58064 [1] NCCL INFO Trees [0] 46/-1/-1->45->44|44->45->46/-1/-1 [1] -1/-1/-1->45->44|44->45->-1/-1/-1 [2] 46/-1/-1->45->44|44->45->46/-1/-1 [3] -1/-1/-1->45->44|44->45->-1/-1/-1 +r9i1n4:24819:24951 [1] NCCL INFO Trees [0] 50/-1/-1->49->48|48->49->50/-1/-1 [1] -1/-1/-1->49->48|48->49->-1/-1/-1 [2] 50/-1/-1->49->48|48->49->50/-1/-1 [3] -1/-1/-1->49->48|48->49->-1/-1/-1 +r9i1n4:24821:24954 [3] NCCL INFO Trees [0] -1/-1/-1->51->50|50->51->-1/-1/-1 [1] 48/-1/-1->51->50|50->51->48/-1/-1 [2] -1/-1/-1->51->50|50->51->-1/-1/-1 [3] 48/-1/-1->51->50|50->51->48/-1/-1 +r9i1n4:24818:24952 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n4:24820:24953 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n4:24819:24951 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n4:24821:24954 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r8i0n3:57933:58064 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r8i0n3:57932:58065 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r8i0n3:57932:58065 [0] NCCL INFO Trees [0] 45/-1/-1->44->40|40->44->45/-1/-1 [1] 45/-1/-1->44->47|47->44->45/-1/-1 [2] 45/40/48->44->52|52->44->45/40/48 [3] 45/-1/-1->44->47|47->44->45/-1/-1 +r8i0n3:57932:58065 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n5:40813:40946 [0] NCCL INFO Trees [0] 53/-1/-1->52->56|56->52->53/-1/-1 [1] 53/-1/-1->52->55|55->52->53/-1/-1 [2] 53/44/60->52->36|36->52->53/44/60 [3] 53/-1/-1->52->55|55->52->53/-1/-1 +r9i1n5:40814:40945 [1] NCCL INFO Trees [0] 54/-1/-1->53->52|52->53->54/-1/-1 [1] -1/-1/-1->53->52|52->53->-1/-1/-1 [2] 54/-1/-1->53->52|52->53->54/-1/-1 [3] -1/-1/-1->53->52|52->53->-1/-1/-1 +r9i1n5:40815:40948 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n5:40816:40947 [3] NCCL INFO Trees [0] -1/-1/-1->55->54|54->55->-1/-1/-1 [1] 52/-1/-1->55->54|54->55->52/-1/-1 [2] -1/-1/-1->55->54|54->55->-1/-1/-1 [3] 52/-1/-1->55->54|54->55->52/-1/-1 +r9i1n5:40813:40946 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n5:40814:40945 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n5:40816:40947 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n6:58315:58445 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n6:58314:58444 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n6:58312:58446 [0] NCCL INFO Trees [0] 57/52/60->56->48|48->56->57/52/60 [1] 57/-1/-1->56->59|59->56->57/-1/-1 [2] 57/-1/-1->56->60|60->56->57/-1/-1 [3] 57/-1/-1->56->59|59->56->57/-1/-1 +r9i1n6:58313:58443 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n6:58312:58446 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n1:68407:68535 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n1:68407:68535 [3] NCCL INFO Trees [0] -1/-1/-1->39->38|38->39->-1/-1/-1 [1] 36/-1/-1->39->38|38->39->36/-1/-1 [2] -1/-1/-1->39->38|38->39->-1/-1/-1 [3] 36/-1/-1->39->38|38->39->36/-1/-1 +r7i7n0:55674:55805 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n0:55674:55805 [1] NCCL INFO Trees [0] 34/-1/-1->33->32|32->33->34/-1/-1 [1] -1/-1/-1->33->32|32->33->-1/-1/-1 [2] 34/-1/-1->33->32|32->33->34/-1/-1 [3] -1/-1/-1->33->32|32->33->-1/-1/-1 +r7i7n2:57611:57741 [3] NCCL INFO Trees [0] -1/-1/-1->43->42|42->43->-1/-1/-1 [1] 40/-1/-1->43->42|42->43->40/-1/-1 [2] -1/-1/-1->43->42|42->43->-1/-1/-1 [3] 40/-1/-1->43->42|42->43->40/-1/-1 +r7i7n2:57610:57740 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n2:57611:57741 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n2:57610:57740 [2] NCCL INFO Trees [0] 43/-1/-1->42->41|41->42->43/-1/-1 [1] 43/38/46->42->50|50->42->43/38/46 [2] 43/-1/-1->42->41|41->42->43/-1/-1 [3] 43/-1/-1->42->46|46->42->43/-1/-1 +r7i7n2:57610:57740 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n2:57609:57739 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i6n8:29153:29283 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i6n8:29154:29284 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i6n8:29155:29286 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i6n8:29153:29283 [1] NCCL INFO Trees [0] 30/-1/-1->29->28|28->29->30/-1/-1 [1] -1/-1/-1->29->28|28->29->-1/-1/-1 [2] 30/-1/-1->29->28|28->29->30/-1/-1 [3] -1/-1/-1->29->28|28->29->-1/-1/-1 +r7i6n8:29154:29284 [2] NCCL INFO Trees [0] 31/-1/-1->30->29|29->30->31/-1/-1 [1] 31/-1/-1->30->26|26->30->31/-1/-1 [2] 31/-1/-1->30->29|29->30->31/-1/-1 [3] 31/26/34->30->22|22->30->31/26/34 +r7i5n3:79997:80127 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i5n3:79997:80127 [3] NCCL INFO Trees [0] -1/-1/-1->27->26|26->27->-1/-1/-1 [1] 24/-1/-1->27->26|26->27->24/-1/-1 [2] -1/-1/-1->27->26|26->27->-1/-1/-1 [3] 24/-1/-1->27->26|26->27->24/-1/-1 +r7i5n3:79997:80127 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n2:57609:57739 [1] NCCL INFO Trees [0] 42/-1/-1->41->40|40->41->42/-1/-1 [1] -1/-1/-1->41->40|40->41->-1/-1/-1 [2] 42/-1/-1->41->40|40->41->42/-1/-1 [3] -1/-1/-1->41->40|40->41->-1/-1/-1 +r7i7n2:57609:57739 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n2:57608:57742 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n5:46160:46290 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n5:46161:46292 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n5:46158:46289 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n5:46159:46291 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n5:46160:46290 [2] NCCL INFO Trees [0] 23/-1/-1->22->21|21->22->23/-1/-1 [1] 23/-1/-1->22->26|26->22->23/-1/-1 [2] 23/-1/-1->22->21|21->22->23/-1/-1 [3] 23/14/30->22->38|38->22->23/14/30 +r7i4n5:46161:46292 [3] NCCL INFO Trees [0] -1/-1/-1->23->22|22->23->-1/-1/-1 [1] 20/-1/-1->23->22|22->23->20/-1/-1 [2] -1/-1/-1->23->22|22->23->-1/-1/-1 [3] 20/-1/-1->23->22|22->23->20/-1/-1 +r7i4n5:46158:46289 [0] NCCL INFO Trees [0] 21/-1/-1->20->24|24->20->21/-1/-1 [1] 21/-1/-1->20->23|23->20->21/-1/-1 [2] 21/12/28->20->36|36->20->21/12/28 [3] 21/-1/-1->20->23|23->20->21/-1/-1 +r7i7n2:57608:57742 [0] NCCL INFO Trees [0] 41/36/44->40->48|48->40->41/36/44 [1] 41/-1/-1->40->43|43->40->41/-1/-1 [2] 41/-1/-1->40->44|44->40->41/-1/-1 [3] 41/-1/-1->40->43|43->40->41/-1/-1 +r7i3n0:38601:38732 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n2:57608:57742 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n7:8763:8895 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n7:8761:8896 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n7:8760:8897 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n7:8762:8894 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r9i1n7:8763:8895 [3] NCCL INFO Trees [0] -1/-1/-1->63->62|62->63->-1/-1/-1 [1] 60/-1/-1->63->62|62->63->60/-1/-1 [2] -1/-1/-1->63->62|62->63->-1/-1/-1 [3] 60/-1/-1->63->62|62->63->60/-1/-1 +r9i1n7:8761:8896 [1] NCCL INFO Trees [0] 62/-1/-1->61->60|60->61->62/-1/-1 [1] -1/-1/-1->61->60|60->61->-1/-1/-1 [2] 62/-1/-1->61->60|60->61->62/-1/-1 [3] -1/-1/-1->61->60|60->61->-1/-1/-1 +r7i2n6:894:1024 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i2n6:895:1026 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i1n3:4942:5074 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i1n3:4943:5075 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n4:1769:1949 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n4:1767:1950 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n4:1769:1949 [3] NCCL INFO Trees [0] -1/-1/-1->19->18|18->19->-1/-1/-1 [1] 16/-1/-1->19->18|18->19->16/-1/-1 [2] -1/-1/-1->19->18|18->19->-1/-1/-1 [3] 16/-1/-1->19->18|18->19->16/-1/-1 +r7i4n4:1768:1951 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n4:1769:1949 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i4n4:1767:1950 [1] NCCL INFO Trees [0] 18/-1/-1->17->16|16->17->18/-1/-1 [1] -1/-1/-1->17->16|16->17->-1/-1/-1 [2] 18/-1/-1->17->16|16->17->18/-1/-1 [3] -1/-1/-1->17->16|16->17->-1/-1/-1 +r7i4n4:1768:1951 [2] NCCL INFO Trees [0] 19/-1/-1->18->17|17->18->19/-1/-1 [1] 19/10/26->18->34|34->18->19/10/26 [2] 19/-1/-1->18->17|17->18->19/-1/-1 [3] 19/-1/-1->18->14|14->18->19/-1/-1 +r6i4n5:37339:37557 [0] NCCL INFO Channel 00/04 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 +r6i4n5:37340:37560 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r6i4n5:37342:37559 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r6i4n5:37339:37557 [0] NCCL INFO Channel 01/04 : 0 3 2 1 4 7 6 5 8 11 10 9 12 15 14 13 16 19 18 17 +r6i4n5:37341:37558 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n1:68407:68535 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n1:68405:68536 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n1:68405:68536 [1] NCCL INFO Trees [0] 38/-1/-1->37->36|36->37->38/-1/-1 [1] -1/-1/-1->37->36|36->37->-1/-1/-1 [2] 38/-1/-1->37->36|36->37->38/-1/-1 [3] -1/-1/-1->37->36|36->37->-1/-1/-1 +r7i7n1:68404:68538 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n1:68406:68537 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n1:68405:68536 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n1:68404:68538 [0] NCCL INFO Trees [0] 37/-1/-1->36->40|40->36->37/-1/-1 [1] 37/-1/-1->36->39|39->36->37/-1/-1 [2] 37/20/52->36->4|4->36->37/20/52 [3] 37/-1/-1->36->39|39->36->37/-1/-1 +r7i7n1:68404:68538 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n1:68406:68537 [2] NCCL INFO Trees [0] 39/-1/-1->38->37|37->38->39/-1/-1 [1] 39/-1/-1->38->42|42->38->39/-1/-1 [2] 39/-1/-1->38->37|37->38->39/-1/-1 [3] 39/22/54->38->6|6->38->39/22/54 +r7i7n1:68406:68537 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n0:55676:55807 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n0:55674:55805 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n0:55675:55806 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n0:55676:55807 [3] NCCL INFO Trees [0] -1/-1/-1->35->34|34->35->-1/-1/-1 [1] 32/-1/-1->35->34|34->35->32/-1/-1 [2] -1/-1/-1->35->34|34->35->-1/-1/-1 [3] 32/-1/-1->35->34|34->35->32/-1/-1 +r7i7n0:55675:55806 [2] NCCL INFO Trees [0] 35/-1/-1->34->33|33->34->35/-1/-1 [1] 35/18/50->34->2|2->34->35/18/50 [2] 35/-1/-1->34->33|33->34->35/-1/-1 [3] 35/-1/-1->34->30|30->34->35/-1/-1 +r7i7n0:55676:55807 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n0:55675:55806 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n0:55673:55804 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i7n0:55673:55804 [0] NCCL INFO Trees [0] 33/16/48->32->0|0->32->33/16/48 [1] 33/-1/-1->32->35|35->32->33/-1/-1 [2] 33/-1/-1->32->28|28->32->33/-1/-1 [3] 33/-1/-1->32->35|35->32->33/-1/-1 +r7i7n0:55673:55804 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i6n8:29152:29285 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i6n8:29155:29286 [3] NCCL INFO Trees [0] -1/-1/-1->31->30|30->31->-1/-1/-1 [1] 28/-1/-1->31->30|30->31->28/-1/-1 [2] -1/-1/-1->31->30|30->31->-1/-1/-1 [3] 28/-1/-1->31->30|30->31->28/-1/-1 +r7i6n8:29154:29284 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i6n8:29153:29283 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i6n8:29152:29285 [0] NCCL INFO Trees [0] 29/-1/-1->28->24|24->28->29/-1/-1 [1] 29/-1/-1->28->31|31->28->29/-1/-1 [2] 29/24/32->28->20|20->28->29/24/32 [3] 29/-1/-1->28->31|31->28->29/-1/-1 +r7i6n8:29155:29286 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i6n8:29152:29285 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i5n3:79994:80125 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i5n3:79995:80126 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i5n3:79996:80128 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i5n3:79994:80125 [0] NCCL INFO Trees [0] 25/20/28->24->16|16->24->25/20/28 [1] 25/-1/-1->24->27|27->24->25/-1/-1 [2] 25/-1/-1->24->28|28->24->25/-1/-1 [3] 25/-1/-1->24->27|27->24->25/-1/-1 +r7i5n3:79995:80126 [1] NCCL INFO Trees [0] 26/-1/-1->25->24|24->25->26/-1/-1 [1] -1/-1/-1->25->24|24->25->-1/-1/-1 [2] 26/-1/-1->25->24|24->25->26/-1/-1 [3] -1/-1/-1->25->24|24->25->-1/-1/-1 +r7i5n3:79996:80128 [2] NCCL INFO Trees [0] 27/-1/-1->26->25|25->26->27/-1/-1 [1] 27/22/30->26->18|18->26->27/22/30 [2] 27/-1/-1->26->25|25->26->27/-1/-1 [3] 27/-1/-1->26->30|30->26->27/-1/-1 +r7i5n3:79994:80125 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i5n3:79995:80126 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i5n3:79996:80128 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n5:46159:46291 [1] NCCL INFO Trees [0] 22/-1/-1->21->20|20->21->22/-1/-1 [1] -1/-1/-1->21->20|20->21->-1/-1/-1 [2] 22/-1/-1->21->20|20->21->22/-1/-1 [3] -1/-1/-1->21->20|20->21->-1/-1/-1 +r7i4n5:46160:46290 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n5:46161:46292 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i4n5:46158:46289 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i4n5:46159:46291 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i3n0:38598:38730 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i3n0:38601:38732 [3] NCCL INFO Trees [0] -1/-1/-1->15->14|14->15->-1/-1/-1 [1] 12/-1/-1->15->14|14->15->12/-1/-1 [2] -1/-1/-1->15->14|14->15->-1/-1/-1 [3] 12/-1/-1->15->14|14->15->12/-1/-1 +r7i3n0:38599:38731 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i3n0:38600:38733 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i3n0:38601:38732 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i3n0:38598:38730 [0] NCCL INFO Trees [0] 13/-1/-1->12->8|8->12->13/-1/-1 [1] 13/-1/-1->12->15|15->12->13/-1/-1 [2] 13/8/16->12->20|20->12->13/8/16 [3] 13/-1/-1->12->15|15->12->13/-1/-1 +r7i3n0:38599:38731 [1] NCCL INFO Trees [0] 14/-1/-1->13->12|12->13->14/-1/-1 [1] -1/-1/-1->13->12|12->13->-1/-1/-1 [2] 14/-1/-1->13->12|12->13->14/-1/-1 [3] -1/-1/-1->13->12|12->13->-1/-1/-1 +r7i3n0:38598:38730 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i3n0:38600:38733 [2] NCCL INFO Trees [0] 15/-1/-1->14->13|13->14->15/-1/-1 [1] 15/-1/-1->14->10|10->14->15/-1/-1 [2] 15/-1/-1->14->13|13->14->15/-1/-1 [3] 15/10/18->14->22|22->14->15/10/18 +r7i3n0:38599:38731 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i3n0:38600:38733 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i2n6:894:1024 [2] NCCL INFO Trees [0] 11/-1/-1->10->9|9->10->11/-1/-1 [1] 11/6/14->10->18|18->10->11/6/14 [2] 11/-1/-1->10->9|9->10->11/-1/-1 [3] 11/-1/-1->10->14|14->10->11/-1/-1 +r7i2n6:895:1026 [3] NCCL INFO Trees [0] -1/-1/-1->11->10|10->11->-1/-1/-1 [1] 8/-1/-1->11->10|10->11->8/-1/-1 [2] -1/-1/-1->11->10|10->11->-1/-1/-1 [3] 8/-1/-1->11->10|10->11->8/-1/-1 +r7i2n6:894:1024 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i2n6:895:1026 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i2n6:893:1023 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i2n6:892:1025 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i2n6:893:1023 [1] NCCL INFO Trees [0] 10/-1/-1->9->8|8->9->10/-1/-1 [1] -1/-1/-1->9->8|8->9->-1/-1/-1 [2] 10/-1/-1->9->8|8->9->10/-1/-1 [3] -1/-1/-1->9->8|8->9->-1/-1/-1 +r7i2n6:892:1025 [0] NCCL INFO Trees [0] 9/4/12->8->16|16->8->9/4/12 [1] 9/-1/-1->8->11|11->8->9/-1/-1 [2] 9/-1/-1->8->12|12->8->9/-1/-1 [3] 9/-1/-1->8->11|11->8->9/-1/-1 +r7i2n6:893:1023 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i2n6:892:1025 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i1n3:4942:5074 [1] NCCL INFO Trees [0] 6/-1/-1->5->4|4->5->6/-1/-1 [1] -1/-1/-1->5->4|4->5->-1/-1/-1 [2] 6/-1/-1->5->4|4->5->6/-1/-1 [3] -1/-1/-1->5->4|4->5->-1/-1/-1 +r7i1n3:4943:5075 [2] NCCL INFO Trees [0] 7/-1/-1->6->5|5->6->7/-1/-1 [1] 7/-1/-1->6->10|10->6->7/-1/-1 [2] 7/-1/-1->6->5|5->6->7/-1/-1 [3] 7/38/-1->6->-1|-1->6->7/38/-1 +r7i1n3:4941:5076 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i1n3:4944:5073 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i1n3:4942:5074 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i1n3:4943:5075 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i1n3:4941:5076 [0] NCCL INFO Trees [0] 5/-1/-1->4->8|8->4->5/-1/-1 [1] 5/-1/-1->4->7|7->4->5/-1/-1 [2] 5/36/-1->4->-1|-1->4->5/36/-1 [3] 5/-1/-1->4->7|7->4->5/-1/-1 +r7i1n3:4944:5073 [3] NCCL INFO Trees [0] -1/-1/-1->7->6|6->7->-1/-1/-1 [1] 4/-1/-1->7->6|6->7->4/-1/-1 [2] -1/-1/-1->7->6|6->7->-1/-1/-1 [3] 4/-1/-1->7->6|6->7->4/-1/-1 +r7i1n3:4941:5076 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i1n3:4944:5073 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n7:8760:8897 [0] NCCL INFO Trees [0] 61/-1/-1->60->56|56->60->61/-1/-1 [1] 61/-1/-1->60->63|63->60->61/-1/-1 [2] 61/56/0->60->52|52->60->61/56/0 [3] 61/-1/-1->60->63|63->60->61/-1/-1 +r9i1n7:8762:8894 [2] NCCL INFO Trees [0] 63/-1/-1->62->61|61->62->63/-1/-1 [1] 63/-1/-1->62->58|58->62->63/-1/-1 [2] 63/-1/-1->62->61|61->62->63/-1/-1 [3] 63/58/2->62->54|54->62->63/58/2 +r9i1n7:8763:8895 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n7:8761:8896 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n7:8760:8897 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n7:8762:8894 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n4:1766:1952 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r7i4n4:1767:1950 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n4:1768:1951 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n4:1766:1952 [0] NCCL INFO Trees [0] 17/8/24->16->32|32->16->17/8/24 [1] 17/-1/-1->16->19|19->16->17/-1/-1 [2] 17/-1/-1->16->12|12->16->17/-1/-1 [3] 17/-1/-1->16->19|19->16->17/-1/-1 +r7i4n4:1766:1952 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r6i4n5:37339:37557 [0] NCCL INFO Channel 02/04 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 +r6i4n5:37340:37560 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] -1/-1/-1->1->0|0->1->-1/-1/-1 [2] 2/-1/-1->1->0|0->1->2/-1/-1 [3] -1/-1/-1->1->0|0->1->-1/-1/-1 +r6i4n5:37342:37559 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->2|2->3->0/-1/-1 [2] -1/-1/-1->3->2|2->3->-1/-1/-1 [3] 0/-1/-1->3->2|2->3->0/-1/-1 +r6i4n5:37340:37560 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r6i4n5:37342:37559 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r6i4n5:37339:37557 [0] NCCL INFO Channel 03/04 : 0 3 2 1 4 7 6 5 8 11 10 9 12 15 14 13 16 19 18 17 +r6i4n5:37341:37558 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] 3/34/-1->2->-1|-1->2->3/34/-1 [2] 3/-1/-1->2->1|1->2->3/-1/-1 [3] 3/-1/-1->2->62|62->2->3/-1/-1 +r6i4n5:37341:37558 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r6i4n5:37339:37557 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 8/8/64 +r6i4n5:37339:37557 [0] NCCL INFO Trees [0] 1/32/-1->0->-1|-1->0->1/32/-1 [1] 1/-1/-1->0->3|3->0->1/-1/-1 [2] 1/-1/-1->0->60|60->0->1/-1/-1 [3] 1/-1/-1->0->3|3->0->1/-1/-1 +r6i4n5:37339:37557 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n1:68405:68536 [1] NCCL INFO Channel 00 : 37[1c000] -> 38[88000] via P2P/IPC +r7i2n6:893:1023 [1] NCCL INFO Channel 00 : 9[1c000] -> 10[88000] via P2P/IPC +r9i1n6:58313:58443 [1] NCCL INFO Channel 00 : 57[1c000] -> 58[88000] via P2P/IPC +r9i1n5:40814:40945 [1] NCCL INFO Channel 00 : 53[1c000] -> 54[88000] via P2P/IPC +r8i0n3:57934:58067 [2] NCCL INFO Channel 00 : 46[88000] -> 47[8a000] via P2P/IPC +r9i1n4:24819:24951 [1] NCCL INFO Channel 00 : 49[1c000] -> 50[88000] via P2P/IPC +r7i7n2:57609:57739 [1] NCCL INFO Channel 00 : 41[1c000] -> 42[88000] via P2P/IPC +r9i1n6:58314:58444 [2] NCCL INFO Channel 00 : 58[88000] -> 59[8a000] via P2P/IPC +r9i1n5:40815:40948 [2] NCCL INFO Channel 00 : 54[88000] -> 55[8a000] via P2P/IPC +r9i1n4:24820:24953 [2] NCCL INFO Channel 00 : 50[88000] -> 51[8a000] via P2P/IPC +r8i0n3:57933:58064 [1] NCCL INFO Channel 00 : 45[1c000] -> 46[88000] via P2P/IPC +r7i7n0:55674:55805 [1] NCCL INFO Channel 00 : 33[1c000] -> 34[88000] via P2P/IPC +r7i6n8:29153:29283 [1] NCCL INFO Channel 00 : 29[1c000] -> 30[88000] via P2P/IPC +r7i5n3:79995:80126 [1] NCCL INFO Channel 00 : 25[1c000] -> 26[88000] via P2P/IPC +r7i3n0:38599:38731 [1] NCCL INFO Channel 00 : 13[1c000] -> 14[88000] via P2P/IPC +r7i4n5:46159:46291 [1] NCCL INFO Channel 00 : 21[1c000] -> 22[88000] via P2P/IPC +r7i7n2:57610:57740 [2] NCCL INFO Channel 00 : 42[88000] -> 43[8a000] via P2P/IPC +r7i1n3:4942:5074 [1] NCCL INFO Channel 00 : 5[1c000] -> 6[88000] via P2P/IPC +r7i4n4:1767:1950 [1] NCCL INFO Channel 00 : 17[1c000] -> 18[88000] via P2P/IPC +r9i1n7:8761:8896 [1] NCCL INFO Channel 00 : 61[1c000] -> 62[88000] via P2P/IPC +r6i4n5:37340:37560 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n0:55675:55806 [2] NCCL INFO Channel 00 : 34[88000] -> 35[8a000] via P2P/IPC +r7i6n8:29154:29284 [2] NCCL INFO Channel 00 : 30[88000] -> 31[8a000] via P2P/IPC +r7i5n3:79996:80128 [2] NCCL INFO Channel 00 : 26[88000] -> 27[8a000] via P2P/IPC +r7i3n0:38600:38733 [2] NCCL INFO Channel 00 : 14[88000] -> 15[8a000] via P2P/IPC +r7i4n5:46160:46290 [2] NCCL INFO Channel 00 : 22[88000] -> 23[8a000] via P2P/IPC +r7i2n6:894:1024 [2] NCCL INFO Channel 00 : 10[88000] -> 11[8a000] via P2P/IPC +r7i1n3:4943:5075 [2] NCCL INFO Channel 00 : 6[88000] -> 7[8a000] via P2P/IPC +r7i4n4:1768:1951 [2] NCCL INFO Channel 00 : 18[88000] -> 19[8a000] via P2P/IPC +r9i1n7:8762:8894 [2] NCCL INFO Channel 00 : 62[88000] -> 63[8a000] via P2P/IPC +r6i4n5:37341:37558 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n6:58314:58444 [2] NCCL INFO Channel 00 : 58[88000] -> 57[1c000] via P2P/IPC +r8i0n3:57934:58067 [2] NCCL INFO Channel 00 : 46[88000] -> 45[1c000] via P2P/IPC +r9i1n5:40815:40948 [2] NCCL INFO Channel 00 : 54[88000] -> 53[1c000] via P2P/IPC +r9i1n4:24820:24953 [2] NCCL INFO Channel 00 : 50[88000] -> 49[1c000] via P2P/IPC +r7i7n2:57610:57740 [2] NCCL INFO Channel 00 : 42[88000] -> 41[1c000] via P2P/IPC +r7i7n0:55675:55806 [2] NCCL INFO Channel 00 : 34[88000] -> 33[1c000] via P2P/IPC +r7i5n3:79996:80128 [2] NCCL INFO Channel 00 : 26[88000] -> 25[1c000] via P2P/IPC +r7i3n0:38600:38733 [2] NCCL INFO Channel 00 : 14[88000] -> 13[1c000] via P2P/IPC +r7i6n8:29154:29284 [2] NCCL INFO Channel 00 : 30[88000] -> 29[1c000] via P2P/IPC +r7i4n5:46160:46290 [2] NCCL INFO Channel 00 : 22[88000] -> 21[1c000] via P2P/IPC +r7i1n3:4943:5075 [2] NCCL INFO Channel 00 : 6[88000] -> 5[1c000] via P2P/IPC +r7i4n4:1768:1951 [2] NCCL INFO Channel 00 : 18[88000] -> 17[1c000] via P2P/IPC +r7i2n6:894:1024 [2] NCCL INFO Channel 00 : 10[88000] -> 9[1c000] via P2P/IPC +r9i1n7:8762:8894 [2] NCCL INFO Channel 00 : 62[88000] -> 61[1c000] via P2P/IPC +r6i4n5:37341:37558 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40813:40946 [0] NCCL INFO Channel 00 : 51[8a000] -> 52[1a000] [receive] via NET/IB/3 +r8i0n3:57932:58065 [0] NCCL INFO Channel 00 : 43[8a000] -> 44[1a000] [receive] via NET/IB/3 +r7i7n1:68404:68538 [0] NCCL INFO Channel 00 : 35[8a000] -> 36[1a000] [receive] via NET/IB/3 +r9i1n4:24818:24952 [0] NCCL INFO Channel 00 : 47[8a000] -> 48[1a000] [receive] via NET/IB/3 +r9i1n6:58312:58446 [0] NCCL INFO Channel 00 : 55[8a000] -> 56[1a000] [receive] via NET/IB/3 +r7i6n8:29152:29285 [0] NCCL INFO Channel 00 : 27[8a000] -> 28[1a000] [receive] via NET/IB/3 +r7i4n5:46158:46289 [0] NCCL INFO Channel 00 : 19[8a000] -> 20[1a000] [receive] via NET/IB/3 +r7i3n0:38598:38730 [0] NCCL INFO Channel 00 : 11[8a000] -> 12[1a000] [receive] via NET/IB/3 +r7i7n2:57608:57742 [0] NCCL INFO Channel 00 : 39[8a000] -> 40[1a000] [receive] via NET/IB/3 +r7i2n6:892:1025 [0] NCCL INFO Channel 00 : 7[8a000] -> 8[1a000] [receive] via NET/IB/3 +r7i1n3:4941:5076 [0] NCCL INFO Channel 00 : 3[8a000] -> 4[1a000] [receive] via NET/IB/3 +r7i5n3:79994:80125 [0] NCCL INFO Channel 00 : 23[8a000] -> 24[1a000] [receive] via NET/IB/3 +r7i7n0:55673:55804 [0] NCCL INFO Channel 00 : 31[8a000] -> 32[1a000] [receive] via NET/IB/3 +r7i4n4:1766:1952 [0] NCCL INFO Channel 00 : 15[8a000] -> 16[1a000] [receive] via NET/IB/3 +r9i1n7:8760:8897 [0] NCCL INFO Channel 00 : 59[8a000] -> 60[1a000] [receive] via NET/IB/3 +r7i7n1:68404:68538 [0] NCCL INFO Channel 00 : 36[1a000] -> 37[1c000] via P2P/IPC +r6i4n5:37339:37557 [0] NCCL INFO Channel 00 : 63[8a000] -> 0[1a000] [receive] via NET/IB/3 +r8i0n3:57932:58065 [0] NCCL INFO Channel 00 : 44[1a000] -> 45[1c000] via P2P/IPC +r9i1n5:40813:40946 [0] NCCL INFO Channel 00 : 52[1a000] -> 53[1c000] via P2P/IPC +r7i6n8:29152:29285 [0] NCCL INFO Channel 00 : 28[1a000] -> 29[1c000] via P2P/IPC +r9i1n4:24818:24952 [0] NCCL INFO Channel 00 : 48[1a000] -> 49[1c000] via P2P/IPC +r7i2n6:892:1025 [0] NCCL INFO Channel 00 : 8[1a000] -> 9[1c000] via P2P/IPC +r9i1n6:58312:58446 [0] NCCL INFO Channel 00 : 56[1a000] -> 57[1c000] via P2P/IPC +r7i3n0:38598:38730 [0] NCCL INFO Channel 00 : 12[1a000] -> 13[1c000] via P2P/IPC +r7i4n5:46158:46289 [0] NCCL INFO Channel 00 : 20[1a000] -> 21[1c000] via P2P/IPC +r9i1n7:8763:8895 [3] NCCL INFO Channel 00 : 63[8a000] -> 0[1a000] [send] via NET/IB/2 +r7i7n2:57608:57742 [0] NCCL INFO Channel 00 : 40[1a000] -> 41[1c000] via P2P/IPC +r6i4n5:37339:37557 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40816:40947 [3] NCCL INFO Channel 00 : 55[8a000] -> 56[1a000] [send] via NET/IB/2 +r8i0n3:57935:58066 [3] NCCL INFO Channel 00 : 47[8a000] -> 48[1a000] [send] via NET/IB/2 +r7i1n3:4941:5076 [0] NCCL INFO Channel 00 : 4[1a000] -> 5[1c000] via P2P/IPC +r9i1n4:24821:24954 [3] NCCL INFO Channel 00 : 51[8a000] -> 52[1a000] [send] via NET/IB/2 +r9i1n7:8760:8897 [0] NCCL INFO Channel 00 : 60[1a000] -> 61[1c000] via P2P/IPC +r7i5n3:79994:80125 [0] NCCL INFO Channel 00 : 24[1a000] -> 25[1c000] via P2P/IPC +r7i7n0:55673:55804 [0] NCCL INFO Channel 00 : 32[1a000] -> 33[1c000] via P2P/IPC +r7i4n4:1766:1952 [0] NCCL INFO Channel 00 : 16[1a000] -> 17[1c000] via P2P/IPC +r9i1n6:58315:58445 [3] NCCL INFO Channel 00 : 59[8a000] -> 60[1a000] [send] via NET/IB/2 +r7i6n8:29155:29286 [3] NCCL INFO Channel 00 : 31[8a000] -> 32[1a000] [send] via NET/IB/2 +r7i7n2:57611:57741 [3] NCCL INFO Channel 00 : 43[8a000] -> 44[1a000] [send] via NET/IB/2 +r7i4n5:46161:46292 [3] NCCL INFO Channel 00 : 23[8a000] -> 24[1a000] [send] via NET/IB/2 +r6i4n5:37342:37559 [3] NCCL INFO Channel 00 : 3[8a000] -> 4[1a000] [send] via NET/IB/2 +r7i3n0:38601:38732 [3] NCCL INFO Channel 00 : 15[8a000] -> 16[1a000] [send] via NET/IB/2 +r8i0n3:57933:58064 [1] NCCL INFO Channel 00 : 45[1c000] -> 44[1a000] via P2P/IPC +r7i1n3:4944:5073 [3] NCCL INFO Channel 00 : 7[8a000] -> 8[1a000] [send] via NET/IB/2 +r7i5n3:79997:80127 [3] NCCL INFO Channel 00 : 27[8a000] -> 28[1a000] [send] via NET/IB/2 +r9i1n5:40814:40945 [1] NCCL INFO Channel 00 : 53[1c000] -> 52[1a000] via P2P/IPC +r7i7n0:55676:55807 [3] NCCL INFO Channel 00 : 35[8a000] -> 36[1a000] [send] via NET/IB/2 +r7i4n4:1769:1949 [3] NCCL INFO Channel 00 : 19[8a000] -> 20[1a000] [send] via NET/IB/2 +r9i1n4:24819:24951 [1] NCCL INFO Channel 00 : 49[1c000] -> 48[1a000] via P2P/IPC +r7i6n8:29153:29283 [1] NCCL INFO Channel 00 : 29[1c000] -> 28[1a000] via P2P/IPC +r7i4n5:46159:46291 [1] NCCL INFO Channel 00 : 21[1c000] -> 20[1a000] via P2P/IPC +r9i1n6:58313:58443 [1] NCCL INFO Channel 00 : 57[1c000] -> 56[1a000] via P2P/IPC +r7i3n0:38599:38731 [1] NCCL INFO Channel 00 : 13[1c000] -> 12[1a000] via P2P/IPC +r7i7n2:57609:57739 [1] NCCL INFO Channel 00 : 41[1c000] -> 40[1a000] via P2P/IPC +r7i2n6:893:1023 [1] NCCL INFO Channel 00 : 9[1c000] -> 8[1a000] via P2P/IPC +r7i7n0:55674:55805 [1] NCCL INFO Channel 00 : 33[1c000] -> 32[1a000] via P2P/IPC +r7i1n3:4942:5074 [1] NCCL INFO Channel 00 : 5[1c000] -> 4[1a000] via P2P/IPC +r7i5n3:79995:80126 [1] NCCL INFO Channel 00 : 25[1c000] -> 24[1a000] via P2P/IPC +r7i4n4:1767:1950 [1] NCCL INFO Channel 00 : 17[1c000] -> 16[1a000] via P2P/IPC +r7i2n6:895:1026 [3] NCCL INFO Channel 00 : 11[8a000] -> 12[1a000] [send] via NET/IB/2 +r9i1n5:40816:40947 [3] NCCL INFO Channel 00 : 55[8a000] -> 54[88000] via P2P/IPC +r8i0n3:57935:58066 [3] NCCL INFO Channel 00 : 47[8a000] -> 46[88000] via P2P/IPC +r6i4n5:37340:37560 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24954 [3] NCCL INFO Channel 00 : 51[8a000] -> 50[88000] via P2P/IPC +r7i6n8:29155:29286 [3] NCCL INFO Channel 00 : 31[8a000] -> 30[88000] via P2P/IPC +r7i7n2:57611:57741 [3] NCCL INFO Channel 00 : 43[8a000] -> 42[88000] via P2P/IPC +r7i4n5:46161:46292 [3] NCCL INFO Channel 00 : 23[8a000] -> 22[88000] via P2P/IPC +r7i3n0:38601:38732 [3] NCCL INFO Channel 00 : 15[8a000] -> 14[88000] via P2P/IPC +r9i1n6:58315:58445 [3] NCCL INFO Channel 00 : 59[8a000] -> 58[88000] via P2P/IPC +r9i1n7:8761:8896 [1] NCCL INFO Channel 00 : 61[1c000] -> 60[1a000] via P2P/IPC +r7i1n3:4944:5073 [3] NCCL INFO Channel 00 : 7[8a000] -> 6[88000] via P2P/IPC +r7i5n3:79997:80127 [3] NCCL INFO Channel 00 : 27[8a000] -> 26[88000] via P2P/IPC +r7i7n0:55676:55807 [3] NCCL INFO Channel 00 : 35[8a000] -> 34[88000] via P2P/IPC +r9i1n7:8763:8895 [3] NCCL INFO Channel 00 : 63[8a000] -> 62[88000] via P2P/IPC +r7i4n4:1769:1949 [3] NCCL INFO Channel 00 : 19[8a000] -> 18[88000] via P2P/IPC +r6i4n5:37342:37559 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57610:57740 [2] NCCL INFO Channel 01 : 42[88000] -> 41[1c000] via P2P/IPC +r7i7n2:57611:57741 [3] NCCL INFO Channel 01 : 43[8a000] -> 42[88000] via P2P/IPC +r7i2n6:895:1026 [3] NCCL INFO Channel 00 : 11[8a000] -> 10[88000] via P2P/IPC +r7i3n0:38600:38733 [2] NCCL INFO Channel 01 : 14[88000] -> 13[1c000] via P2P/IPC +r9i1n5:40815:40948 [2] NCCL INFO Channel 01 : 54[88000] -> 53[1c000] via P2P/IPC +r8i0n3:57935:58066 [3] NCCL INFO Channel 01 : 47[8a000] -> 46[88000] via P2P/IPC +r9i1n5:40816:40947 [3] NCCL INFO Channel 01 : 55[8a000] -> 54[88000] via P2P/IPC +r7i3n0:38601:38732 [3] NCCL INFO Channel 01 : 15[8a000] -> 14[88000] via P2P/IPC +r7i6n8:29155:29286 [3] NCCL INFO Channel 01 : 31[8a000] -> 30[88000] via P2P/IPC +r8i0n3:57934:58067 [2] NCCL INFO Channel 01 : 46[88000] -> 45[1c000] via P2P/IPC +r9i1n4:24820:24953 [2] NCCL INFO Channel 01 : 50[88000] -> 49[1c000] via P2P/IPC +r7i4n5:46160:46290 [2] NCCL INFO Channel 01 : 22[88000] -> 21[1c000] via P2P/IPC +r9i1n6:58314:58444 [2] NCCL INFO Channel 01 : 58[88000] -> 57[1c000] via P2P/IPC +r9i1n4:24821:24954 [3] NCCL INFO Channel 01 : 51[8a000] -> 50[88000] via P2P/IPC +r7i6n8:29154:29284 [2] NCCL INFO Channel 01 : 30[88000] -> 29[1c000] via P2P/IPC +r7i4n5:46161:46292 [3] NCCL INFO Channel 01 : 23[8a000] -> 22[88000] via P2P/IPC +r9i1n6:58315:58445 [3] NCCL INFO Channel 01 : 59[8a000] -> 58[88000] via P2P/IPC +r7i1n3:4944:5073 [3] NCCL INFO Channel 01 : 7[8a000] -> 6[88000] via P2P/IPC +r7i5n3:79997:80127 [3] NCCL INFO Channel 01 : 27[8a000] -> 26[88000] via P2P/IPC +r7i1n3:4943:5075 [2] NCCL INFO Channel 01 : 6[88000] -> 5[1c000] via P2P/IPC +r7i7n0:55675:55806 [2] NCCL INFO Channel 01 : 34[88000] -> 33[1c000] via P2P/IPC +r7i4n4:1768:1951 [2] NCCL INFO Channel 01 : 18[88000] -> 17[1c000] via P2P/IPC +r7i2n6:894:1024 [2] NCCL INFO Channel 01 : 10[88000] -> 9[1c000] via P2P/IPC +r7i5n3:79996:80128 [2] NCCL INFO Channel 01 : 26[88000] -> 25[1c000] via P2P/IPC +r7i7n0:55676:55807 [3] NCCL INFO Channel 01 : 35[8a000] -> 34[88000] via P2P/IPC +r7i4n4:1769:1949 [3] NCCL INFO Channel 01 : 19[8a000] -> 18[88000] via P2P/IPC +r7i2n6:895:1026 [3] NCCL INFO Channel 01 : 11[8a000] -> 10[88000] via P2P/IPC +r9i1n7:8762:8894 [2] NCCL INFO Channel 01 : 62[88000] -> 61[1c000] via P2P/IPC +r9i1n7:8763:8895 [3] NCCL INFO Channel 01 : 63[8a000] -> 62[88000] via P2P/IPC +r6i4n5:37341:37558 [2] NCCL INFO Channel 01 : 2[88000] -> 1[1c000] via P2P/IPC +r6i4n5:37342:37559 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68407:68535 [3] NCCL INFO Channel 00 : 39[8a000] -> 40[1a000] [send] via NET/IB/2 +r7i7n1:68406:68537 [2] NCCL INFO Channel 00 : 38[88000] -> 39[8a000] via P2P/IPC +r9i1n4:24818:24952 [0] NCCL INFO Channel 00 : 40[1a000] -> 48[1a000] [receive] via NET/IB/3 +r9i1n5:40813:40946 [0] NCCL INFO Channel 00 : 52[1a000] -> 56[1a000] [send] via NET/IB/3 +r7i2n6:892:1025 [0] NCCL INFO Channel 00 : 4[1a000] -> 8[1a000] [receive] via NET/IB/3 +r9i1n6:58312:58446 [0] NCCL INFO Channel 00 : 52[1a000] -> 56[1a000] [receive] via NET/IB/3 +r7i1n3:4941:5076 [0] NCCL INFO Channel 00 : 4[1a000] -> 8[1a000] [send] via NET/IB/3 +r7i7n1:68404:68538 [0] NCCL INFO Channel 00 : 36[1a000] -> 40[1a000] [send] via NET/IB/3 +r7i4n4:1766:1952 [0] NCCL INFO Channel 00 : 8[1a000] -> 16[1a000] [receive] via NET/IB/3 +r7i5n3:79994:80125 [0] NCCL INFO Channel 00 : 20[1a000] -> 24[1a000] [receive] via NET/IB/3 +r7i7n0:55673:55804 [0] NCCL INFO Channel 00 : 16[1a000] -> 32[1a000] [receive] via NET/IB/3 +r8i0n3:57932:58065 [0] NCCL INFO Channel 00 : 44[1a000] -> 40[1a000] [send] via NET/IB/3 +r9i1n7:8760:8897 [0] NCCL INFO Channel 00 : 60[1a000] -> 56[1a000] [send] via NET/IB/3 +r6i4n5:37339:37557 [0] NCCL INFO Channel 00 : 32[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i6n8:29152:29285 [0] NCCL INFO Channel 00 : 28[1a000] -> 24[1a000] [send] via NET/IB/3 +r7i4n5:46158:46289 [0] NCCL INFO Channel 00 : 20[1a000] -> 24[1a000] [send] via NET/IB/3 +r7i7n1:68407:68535 [3] NCCL INFO Channel 00 : 39[8a000] -> 38[88000] via P2P/IPC +r7i2n6:893:1023 [1] NCCL INFO Channel 01 : 9[1c000] -> 12[1a000] [send] via NET/IB/3 +r7i7n1:68405:68536 [1] NCCL INFO Channel 00 : 37[1c000] -> 36[1a000] via P2P/IPC +r7i3n0:38598:38730 [0] NCCL INFO Channel 00 : 12[1a000] -> 8[1a000] [send] via NET/IB/3 +r9i1n6:58313:58443 [1] NCCL INFO Channel 01 : 57[1c000] -> 60[1a000] [send] via NET/IB/3 +r9i1n5:40814:40945 [1] NCCL INFO Channel 01 : 53[1c000] -> 56[1a000] [send] via NET/IB/3 +r9i1n4:24819:24951 [1] NCCL INFO Channel 01 : 49[1c000] -> 52[1a000] [send] via NET/IB/3 +r7i5n3:79995:80126 [1] NCCL INFO Channel 01 : 25[1c000] -> 28[1a000] [send] via NET/IB/3 +r7i1n3:4942:5074 [1] NCCL INFO Channel 01 : 5[1c000] -> 8[1a000] [send] via NET/IB/3 +r9i1n5:40815:40948 [2] NCCL INFO Channel 01 : 54[88000] -> 58[88000] [send] via NET/IB/2 +r7i7n1:68406:68537 [2] NCCL INFO Channel 00 : 38[88000] -> 37[1c000] via P2P/IPC +r9i1n4:24820:24953 [2] NCCL INFO Channel 01 : 42[88000] -> 50[88000] [receive] via NET/IB/2 +r7i4n4:1767:1950 [1] NCCL INFO Channel 01 : 17[1c000] -> 20[1a000] [send] via NET/IB/3 +r9i1n6:58314:58444 [2] NCCL INFO Channel 01 : 54[88000] -> 58[88000] [receive] via NET/IB/2 +r7i7n0:55674:55805 [1] NCCL INFO Channel 01 : 33[1c000] -> 36[1a000] [send] via NET/IB/3 +r8i0n3:57934:58067 [2] NCCL INFO Channel 01 : 46[88000] -> 42[88000] [send] via NET/IB/2 +r7i3n0:38599:38731 [1] NCCL INFO Channel 01 : 13[1c000] -> 16[1a000] [send] via NET/IB/3 +r6i4n5:37340:37560 [1] NCCL INFO Channel 01 : 1[1c000] -> 4[1a000] [send] via NET/IB/3 +r7i6n8:29153:29283 [1] NCCL INFO Channel 01 : 29[1c000] -> 32[1a000] [send] via NET/IB/3 +r7i1n3:4943:5075 [2] NCCL INFO Channel 01 : 6[88000] -> 10[88000] [send] via NET/IB/2 +r9i1n7:8761:8896 [1] NCCL INFO Channel 01 : 61[1c000] -> 0[1a000] [send] via NET/IB/3 +r7i5n3:79996:80128 [2] NCCL INFO Channel 01 : 22[88000] -> 26[88000] [receive] via NET/IB/2 +r7i2n6:894:1024 [2] NCCL INFO Channel 01 : 6[88000] -> 10[88000] [receive] via NET/IB/2 +r7i4n4:1768:1951 [2] NCCL INFO Channel 01 : 10[88000] -> 18[88000] [receive] via NET/IB/2 +r7i7n0:55675:55806 [2] NCCL INFO Channel 01 : 18[88000] -> 34[88000] [receive] via NET/IB/2 +r8i0n3:57933:58064 [1] NCCL INFO Channel 01 : 45[1c000] -> 48[1a000] [send] via NET/IB/3 +r7i3n0:38600:38733 [2] NCCL INFO Channel 01 : 14[88000] -> 10[88000] [send] via NET/IB/2 +r7i4n5:46160:46290 [2] NCCL INFO Channel 01 : 22[88000] -> 26[88000] [send] via NET/IB/2 +r7i6n8:29154:29284 [2] NCCL INFO Channel 01 : 30[88000] -> 26[88000] [send] via NET/IB/2 +r7i4n5:46159:46291 [1] NCCL INFO Channel 01 : 21[1c000] -> 24[1a000] [send] via NET/IB/3 +r7i7n1:68407:68535 [3] NCCL INFO Channel 01 : 39[8a000] -> 38[88000] via P2P/IPC +r9i1n7:8762:8894 [2] NCCL INFO Channel 01 : 62[88000] -> 58[88000] [send] via NET/IB/2 +r6i4n5:37341:37558 [2] NCCL INFO Channel 01 : 34[88000] -> 2[88000] [receive] via NET/IB/2 +r7i7n1:68406:68537 [2] NCCL INFO Channel 01 : 38[88000] -> 37[1c000] via P2P/IPC +r9i1n4:24818:24952 [0] NCCL INFO Channel 00 : 56[1a000] -> 48[1a000] [receive] via NET/IB/3 +r7i2n6:892:1025 [0] NCCL INFO Channel 00 : 12[1a000] -> 8[1a000] [receive] via NET/IB/3 +r7i7n2:57608:57742 [0] NCCL INFO Channel 00 : 36[1a000] -> 40[1a000] [receive] via NET/IB/3 +r9i1n6:58312:58446 [0] NCCL INFO Channel 00 : 60[1a000] -> 56[1a000] [receive] via NET/IB/3 +r7i7n0:55673:55804 [0] NCCL INFO Channel 00 : 48[1a000] -> 32[1a000] [receive] via NET/IB/3 +r7i5n3:79994:80125 [0] NCCL INFO Channel 00 : 28[1a000] -> 24[1a000] [receive] via NET/IB/3 +r7i4n4:1766:1952 [0] NCCL INFO Channel 00 : 24[1a000] -> 16[1a000] [receive] via NET/IB/3 +r7i7n2:57610:57740 [2] NCCL INFO Channel 01 : 38[88000] -> 42[88000] [receive] via NET/IB/2 +r7i7n2:57609:57739 [1] NCCL INFO Channel 01 : 41[1c000] -> 44[1a000] [send] via NET/IB/3 +r9i1n5:40813:40946 [0] NCCL INFO Channel 00 : 56[1a000] -> 52[1a000] [receive] via NET/IB/3 +r7i1n3:4941:5076 [0] NCCL INFO Channel 00 : 8[1a000] -> 4[1a000] [receive] via NET/IB/3 +r9i1n6:58314:58444 [2] NCCL INFO Channel 01 : 62[88000] -> 58[88000] [receive] via NET/IB/2 +r9i1n4:24820:24953 [2] NCCL INFO Channel 01 : 58[88000] -> 50[88000] [receive] via NET/IB/2 +r7i4n5:46158:46289 [0] NCCL INFO Channel 00 : 24[1a000] -> 20[1a000] [receive] via NET/IB/3 +r7i5n3:79996:80128 [2] NCCL INFO Channel 01 : 30[88000] -> 26[88000] [receive] via NET/IB/2 +r7i2n6:894:1024 [2] NCCL INFO Channel 01 : 14[88000] -> 10[88000] [receive] via NET/IB/2 +r7i7n0:55675:55806 [2] NCCL INFO Channel 01 : 50[88000] -> 34[88000] [receive] via NET/IB/2 +r7i4n4:1768:1951 [2] NCCL INFO Channel 01 : 26[88000] -> 18[88000] [receive] via NET/IB/2 +r9i1n5:40815:40948 [2] NCCL INFO Channel 01 : 58[88000] -> 54[88000] [receive] via NET/IB/2 +r7i1n3:4943:5075 [2] NCCL INFO Channel 01 : 10[88000] -> 6[88000] [receive] via NET/IB/2 +r9i1n5:40815:40948 [2] NCCL INFO Channel 01 : 54[88000] -> 55[8a000] via P2P/IPC +r7i7n1:68405:68536 [1] NCCL INFO Channel 01 : 37[1c000] -> 40[1a000] [send] via NET/IB/3 +r7i4n5:46160:46290 [2] NCCL INFO Channel 01 : 26[88000] -> 22[88000] [receive] via NET/IB/2 +r7i1n3:4943:5075 [2] NCCL INFO Channel 01 : 6[88000] -> 7[8a000] via P2P/IPC +r7i4n5:46160:46290 [2] NCCL INFO Channel 01 : 22[88000] -> 23[8a000] via P2P/IPC +r7i7n1:68406:68537 [2] NCCL INFO Channel 01 : 38[88000] -> 42[88000] [send] via NET/IB/2 +r7i2n6:892:1025 [0] NCCL INFO Channel 00 : 8[1a000] -> 16[1a000] [send] via NET/IB/3 +r9i1n4:24818:24952 [0] NCCL INFO Channel 00 : 48[1a000] -> 32[1a000] [send] via NET/IB/3 +r7i7n2:57608:57742 [0] NCCL INFO Channel 00 : 44[1a000] -> 40[1a000] [receive] via NET/IB/3 +r9i1n6:58312:58446 [0] NCCL INFO Channel 00 : 56[1a000] -> 48[1a000] [send] via NET/IB/3 +r7i5n3:79994:80125 [0] NCCL INFO Channel 00 : 24[1a000] -> 16[1a000] [send] via NET/IB/3 +r7i7n0:55673:55804 [0] NCCL INFO Channel 00 : 32[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i4n4:1766:1952 [0] NCCL INFO Channel 00 : 16[1a000] -> 32[1a000] [send] via NET/IB/3 +r7i7n2:57610:57740 [2] NCCL INFO Channel 01 : 46[88000] -> 42[88000] [receive] via NET/IB/2 +r7i3n0:38598:38730 [0] NCCL INFO Channel 00 : 8[1a000] -> 12[1a000] [receive] via NET/IB/3 +r7i7n1:68404:68538 [0] NCCL INFO Channel 00 : 40[1a000] -> 36[1a000] [receive] via NET/IB/3 +r9i1n7:8760:8897 [0] NCCL INFO Channel 00 : 56[1a000] -> 60[1a000] [receive] via NET/IB/3 +r7i6n8:29152:29285 [0] NCCL INFO Channel 00 : 24[1a000] -> 28[1a000] [receive] via NET/IB/3 +r9i1n6:58314:58444 [2] NCCL INFO Channel 01 : 58[88000] -> 50[88000] [send] via NET/IB/2 +r9i1n4:24820:24953 [2] NCCL INFO Channel 01 : 50[88000] -> 34[88000] [send] via NET/IB/2 +r7i2n6:894:1024 [2] NCCL INFO Channel 01 : 10[88000] -> 18[88000] [send] via NET/IB/2 +r7i5n3:79996:80128 [2] NCCL INFO Channel 01 : 26[88000] -> 18[88000] [send] via NET/IB/2 +r7i7n0:55675:55806 [2] NCCL INFO Channel 01 : 34[88000] -> 2[88000] [send] via NET/IB/2 +r7i4n4:1768:1951 [2] NCCL INFO Channel 01 : 18[88000] -> 34[88000] [send] via NET/IB/2 +r9i1n7:8762:8894 [2] NCCL INFO Channel 01 : 58[88000] -> 62[88000] [receive] via NET/IB/2 +r7i3n0:38600:38733 [2] NCCL INFO Channel 01 : 10[88000] -> 14[88000] [receive] via NET/IB/2 +r7i6n8:29154:29284 [2] NCCL INFO Channel 01 : 26[88000] -> 30[88000] [receive] via NET/IB/2 +r9i1n7:8762:8894 [2] NCCL INFO Channel 01 : 62[88000] -> 63[8a000] via P2P/IPC +r7i3n0:38600:38733 [2] NCCL INFO Channel 01 : 14[88000] -> 15[8a000] via P2P/IPC +r7i6n8:29154:29284 [2] NCCL INFO Channel 01 : 30[88000] -> 31[8a000] via P2P/IPC +r6i4n5:37341:37558 [2] NCCL INFO Channel 01 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n1:68406:68537 [2] NCCL INFO Channel 01 : 42[88000] -> 38[88000] [receive] via NET/IB/2 +r7i7n2:57608:57742 [0] NCCL INFO Channel 00 : 40[1a000] -> 48[1a000] [send] via NET/IB/3 +r7i7n1:68406:68537 [2] NCCL INFO Channel 01 : 38[88000] -> 39[8a000] via P2P/IPC +r7i7n2:57610:57740 [2] NCCL INFO Channel 01 : 42[88000] -> 50[88000] [send] via NET/IB/2 +r8i0n3:57932:58065 [0] NCCL INFO Channel 00 : 40[1a000] -> 44[1a000] [receive] via NET/IB/3 +r8i0n3:57934:58067 [2] NCCL INFO Channel 01 : 42[88000] -> 46[88000] [receive] via NET/IB/2 +r6i4n5:37339:37557 [0] NCCL INFO Channel 00 : 0[1a000] -> 32[1a000] [send] via NET/IB/3 +r8i0n3:57934:58067 [2] NCCL INFO Channel 01 : 46[88000] -> 47[8a000] via P2P/IPC +r7i2n6:892:1025 [0] NCCL INFO Channel 00 : 16[1a000] -> 8[1a000] [receive] via NET/IB/3 +r9i1n6:58312:58446 [0] NCCL INFO Channel 00 : 48[1a000] -> 56[1a000] [receive] via NET/IB/3 +r7i5n3:79994:80125 [0] NCCL INFO Channel 00 : 16[1a000] -> 24[1a000] [receive] via NET/IB/3 +r7i7n0:55673:55804 [0] NCCL INFO Channel 00 : 0[1a000] -> 32[1a000] [receive] via NET/IB/3 +r7i4n4:1766:1952 [0] NCCL INFO Channel 00 : 32[1a000] -> 16[1a000] [receive] via NET/IB/3 +r6i4n5:37341:37558 [2] NCCL INFO Channel 01 : 2[88000] -> 34[88000] [send] via NET/IB/2 +r7i2n6:894:1024 [2] NCCL INFO Channel 01 : 18[88000] -> 10[88000] [receive] via NET/IB/2 +r9i1n6:58314:58444 [2] NCCL INFO Channel 01 : 50[88000] -> 58[88000] [receive] via NET/IB/2 +r7i5n3:79996:80128 [2] NCCL INFO Channel 01 : 18[88000] -> 26[88000] [receive] via NET/IB/2 +r7i7n0:55675:55806 [2] NCCL INFO Channel 01 : 2[88000] -> 34[88000] [receive] via NET/IB/2 +r9i1n6:58314:58444 [2] NCCL INFO Channel 01 : 58[88000] -> 59[8a000] via P2P/IPC +r7i2n6:894:1024 [2] NCCL INFO Channel 01 : 10[88000] -> 11[8a000] via P2P/IPC +r7i4n4:1768:1951 [2] NCCL INFO Channel 01 : 34[88000] -> 18[88000] [receive] via NET/IB/2 +r7i5n3:79996:80128 [2] NCCL INFO Channel 01 : 26[88000] -> 27[8a000] via P2P/IPC +r7i7n0:55675:55806 [2] NCCL INFO Channel 01 : 34[88000] -> 35[8a000] via P2P/IPC +r7i4n4:1768:1951 [2] NCCL INFO Channel 01 : 18[88000] -> 19[8a000] via P2P/IPC +r9i1n4:24818:24952 [0] NCCL INFO Channel 00 : 32[1a000] -> 48[1a000] [receive] via NET/IB/3 +r7i2n6:892:1025 [0] NCCL INFO Channel 00 : 8[1a000] -> 4[1a000] [send] via NET/IB/3 +r9i1n4:24820:24953 [2] NCCL INFO Channel 01 : 34[88000] -> 50[88000] [receive] via NET/IB/2 +r7i7n2:57608:57742 [0] NCCL INFO Channel 00 : 48[1a000] -> 40[1a000] [receive] via NET/IB/3 +r7i5n3:79994:80125 [0] NCCL INFO Channel 00 : 24[1a000] -> 20[1a000] [send] via NET/IB/3 +r9i1n4:24820:24953 [2] NCCL INFO Channel 01 : 50[88000] -> 51[8a000] via P2P/IPC +r7i7n0:55673:55804 [0] NCCL INFO Channel 00 : 32[1a000] -> 16[1a000] [send] via NET/IB/3 +r9i1n6:58312:58446 [0] NCCL INFO Channel 00 : 56[1a000] -> 52[1a000] [send] via NET/IB/3 +r7i7n2:57610:57740 [2] NCCL INFO Channel 01 : 50[88000] -> 42[88000] [receive] via NET/IB/2 +r7i4n4:1766:1952 [0] NCCL INFO Channel 00 : 16[1a000] -> 8[1a000] [send] via NET/IB/3 +r7i7n2:57610:57740 [2] NCCL INFO Channel 01 : 42[88000] -> 43[8a000] via P2P/IPC +r6i4n5:37339:37557 [0] NCCL INFO Channel 01 : 61[1c000] -> 0[1a000] [receive] via NET/IB/3 +r6i4n5:37339:37557 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i2n6:894:1024 [2] NCCL INFO Channel 01 : 10[88000] -> 6[88000] [send] via NET/IB/2 +r9i1n6:58314:58444 [2] NCCL INFO Channel 01 : 58[88000] -> 54[88000] [send] via NET/IB/2 +r7i5n3:79996:80128 [2] NCCL INFO Channel 01 : 26[88000] -> 22[88000] [send] via NET/IB/2 +r7i7n0:55675:55806 [2] NCCL INFO Channel 01 : 34[88000] -> 18[88000] [send] via NET/IB/2 +r7i4n4:1768:1951 [2] NCCL INFO Channel 01 : 18[88000] -> 10[88000] [send] via NET/IB/2 +r9i1n7:8761:8896 [1] NCCL INFO Channel 01 : 61[1c000] -> 60[1a000] via P2P/IPC +r6i4n5:37342:37559 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24818:24952 [0] NCCL INFO Channel 00 : 48[1a000] -> 40[1a000] [send] via NET/IB/3 +r7i2n6:892:1025 [0] NCCL INFO Channel 00 : 8[1a000] -> 12[1a000] [send] via NET/IB/3 +r7i5n3:79994:80125 [0] NCCL INFO Channel 00 : 24[1a000] -> 28[1a000] [send] via NET/IB/3 +r6i4n5:37341:37558 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n0:55673:55804 [0] NCCL INFO Channel 00 : 32[1a000] -> 48[1a000] [send] via NET/IB/3 +r7i7n2:57608:57742 [0] NCCL INFO Channel 00 : 40[1a000] -> 36[1a000] [send] via NET/IB/3 +r9i1n4:24820:24953 [2] NCCL INFO Channel 01 : 50[88000] -> 42[88000] [send] via NET/IB/2 +r9i1n6:58312:58446 [0] NCCL INFO Channel 00 : 56[1a000] -> 60[1a000] [send] via NET/IB/3 +r7i4n4:1766:1952 [0] NCCL INFO Channel 00 : 16[1a000] -> 24[1a000] [send] via NET/IB/3 +r7i7n2:57610:57740 [2] NCCL INFO Channel 01 : 42[88000] -> 38[88000] [send] via NET/IB/2 +r7i2n6:894:1024 [2] NCCL INFO Channel 01 : 10[88000] -> 14[88000] [send] via NET/IB/2 +r9i1n6:58314:58444 [2] NCCL INFO Channel 01 : 58[88000] -> 62[88000] [send] via NET/IB/2 +r7i5n3:79996:80128 [2] NCCL INFO Channel 01 : 26[88000] -> 30[88000] [send] via NET/IB/2 +r7i7n0:55675:55806 [2] NCCL INFO Channel 01 : 34[88000] -> 50[88000] [send] via NET/IB/2 +r7i4n4:1768:1951 [2] NCCL INFO Channel 01 : 18[88000] -> 26[88000] [send] via NET/IB/2 +r9i1n4:24818:24952 [0] NCCL INFO Channel 00 : 48[1a000] -> 56[1a000] [send] via NET/IB/3 +r7i7n2:57608:57742 [0] NCCL INFO Channel 00 : 40[1a000] -> 44[1a000] [send] via NET/IB/3 +r9i1n4:24820:24953 [2] NCCL INFO Channel 01 : 50[88000] -> 58[88000] [send] via NET/IB/2 +r7i1n3:4941:5076 [0] NCCL INFO Channel 01 : 1[1c000] -> 4[1a000] [receive] via NET/IB/3 +r7i7n2:57610:57740 [2] NCCL INFO Channel 01 : 42[88000] -> 46[88000] [send] via NET/IB/2 +r7i1n3:4941:5076 [0] NCCL INFO Channel 01 : 4[1a000] -> 7[8a000] via P2P/IPC +r7i3n0:38598:38730 [0] NCCL INFO Channel 01 : 9[1c000] -> 12[1a000] [receive] via NET/IB/3 +r7i4n5:46158:46289 [0] NCCL INFO Channel 01 : 17[1c000] -> 20[1a000] [receive] via NET/IB/3 +r7i3n0:38598:38730 [0] NCCL INFO Channel 01 : 12[1a000] -> 15[8a000] via P2P/IPC +r9i1n5:40813:40946 [0] NCCL INFO Channel 01 : 49[1c000] -> 52[1a000] [receive] via NET/IB/3 +r7i4n5:46158:46289 [0] NCCL INFO Channel 01 : 20[1a000] -> 23[8a000] via P2P/IPC +r7i2n6:892:1025 [0] NCCL INFO Channel 01 : 5[1c000] -> 8[1a000] [receive] via NET/IB/3 +r9i1n5:40813:40946 [0] NCCL INFO Channel 01 : 52[1a000] -> 55[8a000] via P2P/IPC +r7i6n8:29152:29285 [0] NCCL INFO Channel 01 : 25[1c000] -> 28[1a000] [receive] via NET/IB/3 +r7i1n3:4944:5073 [3] NCCL INFO Channel 01 : 7[8a000] -> 4[1a000] via P2P/IPC +r7i2n6:892:1025 [0] NCCL INFO Channel 01 : 8[1a000] -> 11[8a000] via P2P/IPC +r6i4n5:37340:37560 [1] NCCL INFO Channel 01 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29152:29285 [0] NCCL INFO Channel 01 : 28[1a000] -> 31[8a000] via P2P/IPC +r9i1n7:8760:8897 [0] NCCL INFO Channel 01 : 57[1c000] -> 60[1a000] [receive] via NET/IB/3 +r7i7n0:55673:55804 [0] NCCL INFO Channel 01 : 29[1c000] -> 32[1a000] [receive] via NET/IB/3 +r7i3n0:38601:38732 [3] NCCL INFO Channel 01 : 15[8a000] -> 12[1a000] via P2P/IPC +r7i5n3:79994:80125 [0] NCCL INFO Channel 01 : 21[1c000] -> 24[1a000] [receive] via NET/IB/3 +r7i4n5:46161:46292 [3] NCCL INFO Channel 01 : 23[8a000] -> 20[1a000] via P2P/IPC +r7i7n0:55673:55804 [0] NCCL INFO Channel 01 : 32[1a000] -> 35[8a000] via P2P/IPC +r9i1n7:8760:8897 [0] NCCL INFO Channel 01 : 60[1a000] -> 63[8a000] via P2P/IPC +r7i4n4:1767:1950 [1] NCCL INFO Channel 01 : 17[1c000] -> 16[1a000] via P2P/IPC +r7i5n3:79994:80125 [0] NCCL INFO Channel 01 : 24[1a000] -> 27[8a000] via P2P/IPC +r9i1n5:40816:40947 [3] NCCL INFO Channel 01 : 55[8a000] -> 52[1a000] via P2P/IPC +r7i1n3:4942:5074 [1] NCCL INFO Channel 01 : 5[1c000] -> 4[1a000] via P2P/IPC +r9i1n4:24819:24951 [1] NCCL INFO Channel 01 : 49[1c000] -> 48[1a000] via P2P/IPC +r7i2n6:893:1023 [1] NCCL INFO Channel 01 : 9[1c000] -> 8[1a000] via P2P/IPC +r7i4n4:1766:1952 [0] NCCL INFO Channel 01 : 13[1c000] -> 16[1a000] [receive] via NET/IB/3 +r7i6n8:29155:29286 [3] NCCL INFO Channel 01 : 31[8a000] -> 28[1a000] via P2P/IPC +r7i4n4:1766:1952 [0] NCCL INFO Channel 01 : 16[1a000] -> 19[8a000] via P2P/IPC +r7i2n6:895:1026 [3] NCCL INFO Channel 01 : 11[8a000] -> 8[1a000] via P2P/IPC +r9i1n6:58313:58443 [1] NCCL INFO Channel 01 : 57[1c000] -> 56[1a000] via P2P/IPC +r6i4n5:37339:37557 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n5:46159:46291 [1] NCCL INFO Channel 01 : 21[1c000] -> 20[1a000] via P2P/IPC +r7i5n3:79995:80126 [1] NCCL INFO Channel 01 : 25[1c000] -> 24[1a000] via P2P/IPC +r7i7n0:55676:55807 [3] NCCL INFO Channel 01 : 35[8a000] -> 32[1a000] via P2P/IPC +r7i6n8:29153:29283 [1] NCCL INFO Channel 01 : 29[1c000] -> 28[1a000] via P2P/IPC +r7i1n3:4941:5076 [0] NCCL INFO Channel 01 : 4[1a000] -> 5[1c000] via P2P/IPC +r7i3n0:38599:38731 [1] NCCL INFO Channel 01 : 13[1c000] -> 12[1a000] via P2P/IPC +r7i5n3:79997:80127 [3] NCCL INFO Channel 01 : 27[8a000] -> 24[1a000] via P2P/IPC +r9i1n7:8763:8895 [3] NCCL INFO Channel 01 : 63[8a000] -> 60[1a000] via P2P/IPC +r7i4n4:1769:1949 [3] NCCL INFO Channel 01 : 19[8a000] -> 16[1a000] via P2P/IPC +r7i2n6:892:1025 [0] NCCL INFO Channel 01 : 8[1a000] -> 9[1c000] via P2P/IPC +r7i4n5:46158:46289 [0] NCCL INFO Channel 01 : 20[1a000] -> 21[1c000] via P2P/IPC +r7i6n8:29152:29285 [0] NCCL INFO Channel 01 : 28[1a000] -> 29[1c000] via P2P/IPC +r6i4n5:37340:37560 [1] NCCL INFO Channel 02 : 1[1c000] -> 2[88000] via P2P/IPC +r7i3n0:38598:38730 [0] NCCL INFO Channel 01 : 12[1a000] -> 13[1c000] via P2P/IPC +r7i1n3:4943:5075 [2] NCCL INFO Channel 02 : 6[88000] -> 7[8a000] via P2P/IPC +r7i5n3:79994:80125 [0] NCCL INFO Channel 01 : 24[1a000] -> 25[1c000] via P2P/IPC +r7i7n1:68404:68538 [0] NCCL INFO Channel 01 : 33[1c000] -> 36[1a000] [receive] via NET/IB/3 +r7i1n3:4942:5074 [1] NCCL INFO Channel 02 : 5[1c000] -> 6[88000] via P2P/IPC +r9i1n6:58312:58446 [0] NCCL INFO Channel 01 : 53[1c000] -> 56[1a000] [receive] via NET/IB/3 +r7i4n4:1766:1952 [0] NCCL INFO Channel 01 : 16[1a000] -> 17[1c000] via P2P/IPC +r9i1n7:8760:8897 [0] NCCL INFO Channel 01 : 60[1a000] -> 61[1c000] via P2P/IPC +r7i7n1:68404:68538 [0] NCCL INFO Channel 01 : 36[1a000] -> 39[8a000] via P2P/IPC +r9i1n6:58312:58446 [0] NCCL INFO Channel 01 : 56[1a000] -> 59[8a000] via P2P/IPC +r7i2n6:893:1023 [1] NCCL INFO Channel 02 : 9[1c000] -> 10[88000] via P2P/IPC +r9i1n4:24818:24952 [0] NCCL INFO Channel 01 : 45[1c000] -> 48[1a000] [receive] via NET/IB/3 +r7i2n6:894:1024 [2] NCCL INFO Channel 02 : 10[88000] -> 11[8a000] via P2P/IPC +r7i7n0:55675:55806 [2] NCCL INFO Channel 02 : 34[88000] -> 35[8a000] via P2P/IPC +r7i6n8:29153:29283 [1] NCCL INFO Channel 02 : 29[1c000] -> 30[88000] via P2P/IPC +r8i0n3:57932:58065 [0] NCCL INFO Channel 01 : 41[1c000] -> 44[1a000] [receive] via NET/IB/3 +r6i4n5:37341:37558 [2] NCCL INFO Channel 02 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n4:24818:24952 [0] NCCL INFO Channel 01 : 48[1a000] -> 51[8a000] via P2P/IPC +r7i1n3:4943:5075 [2] NCCL INFO Channel 02 : 6[88000] -> 5[1c000] via P2P/IPC +r7i4n5:46160:46290 [2] NCCL INFO Channel 02 : 22[88000] -> 23[8a000] via P2P/IPC +r7i6n8:29154:29284 [2] NCCL INFO Channel 02 : 30[88000] -> 31[8a000] via P2P/IPC +r8i0n3:57932:58065 [0] NCCL INFO Channel 01 : 44[1a000] -> 47[8a000] via P2P/IPC +r7i3n0:38600:38733 [2] NCCL INFO Channel 02 : 14[88000] -> 15[8a000] via P2P/IPC +r7i4n5:46159:46291 [1] NCCL INFO Channel 02 : 21[1c000] -> 22[88000] via P2P/IPC +r7i7n2:57608:57742 [0] NCCL INFO Channel 01 : 37[1c000] -> 40[1a000] [receive] via NET/IB/3 +r7i7n1:68407:68535 [3] NCCL INFO Channel 01 : 39[8a000] -> 36[1a000] via P2P/IPC +r7i7n0:55674:55805 [1] NCCL INFO Channel 01 : 33[1c000] -> 32[1a000] via P2P/IPC +r9i1n5:40814:40945 [1] NCCL INFO Channel 01 : 53[1c000] -> 52[1a000] via P2P/IPC +r7i5n3:79995:80126 [1] NCCL INFO Channel 02 : 25[1c000] -> 26[88000] via P2P/IPC +r7i7n2:57608:57742 [0] NCCL INFO Channel 01 : 40[1a000] -> 43[8a000] via P2P/IPC +r7i3n0:38599:38731 [1] NCCL INFO Channel 02 : 13[1c000] -> 14[88000] via P2P/IPC +r7i5n3:79996:80128 [2] NCCL INFO Channel 02 : 26[88000] -> 27[8a000] via P2P/IPC +r7i2n6:894:1024 [2] NCCL INFO Channel 02 : 10[88000] -> 9[1c000] via P2P/IPC +r9i1n6:58315:58445 [3] NCCL INFO Channel 01 : 59[8a000] -> 56[1a000] via P2P/IPC +r7i4n4:1767:1950 [1] NCCL INFO Channel 02 : 17[1c000] -> 18[88000] via P2P/IPC +r7i4n4:1768:1951 [2] NCCL INFO Channel 02 : 18[88000] -> 19[8a000] via P2P/IPC +r7i6n8:29154:29284 [2] NCCL INFO Channel 02 : 30[88000] -> 29[1c000] via P2P/IPC +r9i1n7:8761:8896 [1] NCCL INFO Channel 02 : 61[1c000] -> 62[88000] via P2P/IPC +r7i4n5:46160:46290 [2] NCCL INFO Channel 02 : 22[88000] -> 21[1c000] via P2P/IPC +r9i1n4:24821:24954 [3] NCCL INFO Channel 01 : 51[8a000] -> 48[1a000] via P2P/IPC +r8i0n3:57933:58064 [1] NCCL INFO Channel 01 : 45[1c000] -> 44[1a000] via P2P/IPC +r7i3n0:38600:38733 [2] NCCL INFO Channel 02 : 14[88000] -> 13[1c000] via P2P/IPC +r7i7n1:68405:68536 [1] NCCL INFO Channel 01 : 37[1c000] -> 36[1a000] via P2P/IPC +r7i7n0:55673:55804 [0] NCCL INFO Channel 01 : 32[1a000] -> 33[1c000] via P2P/IPC +r8i0n3:57935:58066 [3] NCCL INFO Channel 01 : 47[8a000] -> 44[1a000] via P2P/IPC +r7i5n3:79996:80128 [2] NCCL INFO Channel 02 : 26[88000] -> 25[1c000] via P2P/IPC +r9i1n5:40813:40946 [0] NCCL INFO Channel 01 : 52[1a000] -> 53[1c000] via P2P/IPC +r7i7n2:57609:57739 [1] NCCL INFO Channel 01 : 41[1c000] -> 40[1a000] via P2P/IPC +r7i4n4:1768:1951 [2] NCCL INFO Channel 02 : 18[88000] -> 17[1c000] via P2P/IPC +r7i7n2:57611:57741 [3] NCCL INFO Channel 01 : 43[8a000] -> 40[1a000] via P2P/IPC +r9i1n6:58312:58446 [0] NCCL INFO Channel 01 : 56[1a000] -> 57[1c000] via P2P/IPC +r9i1n4:24818:24952 [0] NCCL INFO Channel 01 : 48[1a000] -> 49[1c000] via P2P/IPC +r7i7n1:68404:68538 [0] NCCL INFO Channel 01 : 36[1a000] -> 37[1c000] via P2P/IPC +r8i0n3:57932:58065 [0] NCCL INFO Channel 01 : 44[1a000] -> 45[1c000] via P2P/IPC +r6i4n5:37339:37557 [0] NCCL INFO Channel 02 : 63[8a000] -> 0[1a000] [receive] via NET/IB/3 +r7i7n0:55674:55805 [1] NCCL INFO Channel 02 : 33[1c000] -> 34[88000] via P2P/IPC +r6i4n5:37342:37559 [3] NCCL INFO Channel 02 : 3[8a000] -> 4[1a000] [send] via NET/IB/2 +r9i1n5:40815:40948 [2] NCCL INFO Channel 02 : 54[88000] -> 55[8a000] via P2P/IPC +r7i1n3:4944:5073 [3] NCCL INFO Channel 02 : 7[8a000] -> 8[1a000] [send] via NET/IB/2 +r7i1n3:4941:5076 [0] NCCL INFO Channel 02 : 3[8a000] -> 4[1a000] [receive] via NET/IB/3 +r9i1n5:40814:40945 [1] NCCL INFO Channel 02 : 53[1c000] -> 54[88000] via P2P/IPC +r7i7n2:57608:57742 [0] NCCL INFO Channel 01 : 40[1a000] -> 41[1c000] via P2P/IPC +r9i1n7:8762:8894 [2] NCCL INFO Channel 02 : 62[88000] -> 63[8a000] via P2P/IPC +r6i4n5:37339:37557 [0] NCCL INFO Channel 02 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i1n3:4941:5076 [0] NCCL INFO Channel 02 : 4[1a000] -> 5[1c000] via P2P/IPC +r9i1n6:58313:58443 [1] NCCL INFO Channel 02 : 57[1c000] -> 58[88000] via P2P/IPC +r9i1n6:58314:58444 [2] NCCL INFO Channel 02 : 58[88000] -> 59[8a000] via P2P/IPC +r7i2n6:892:1025 [0] NCCL INFO Channel 02 : 7[8a000] -> 8[1a000] [receive] via NET/IB/3 +r7i7n0:55675:55806 [2] NCCL INFO Channel 02 : 34[88000] -> 33[1c000] via P2P/IPC +r7i2n6:895:1026 [3] NCCL INFO Channel 02 : 11[8a000] -> 12[1a000] [send] via NET/IB/2 +r7i2n6:892:1025 [0] NCCL INFO Channel 02 : 8[1a000] -> 9[1c000] via P2P/IPC +r7i6n8:29155:29286 [3] NCCL INFO Channel 02 : 31[8a000] -> 32[1a000] [send] via NET/IB/2 +r7i4n5:46158:46289 [0] NCCL INFO Channel 02 : 19[8a000] -> 20[1a000] [receive] via NET/IB/3 +r8i0n3:57933:58064 [1] NCCL INFO Channel 02 : 45[1c000] -> 46[88000] via P2P/IPC +r7i6n8:29152:29285 [0] NCCL INFO Channel 02 : 27[8a000] -> 28[1a000] [receive] via NET/IB/3 +r9i1n5:40815:40948 [2] NCCL INFO Channel 02 : 54[88000] -> 53[1c000] via P2P/IPC +r9i1n4:24819:24951 [1] NCCL INFO Channel 02 : 49[1c000] -> 50[88000] via P2P/IPC +r7i7n1:68406:68537 [2] NCCL INFO Channel 02 : 38[88000] -> 39[8a000] via P2P/IPC +r7i4n5:46161:46292 [3] NCCL INFO Channel 02 : 23[8a000] -> 24[1a000] [send] via NET/IB/2 +r9i1n4:24820:24953 [2] NCCL INFO Channel 02 : 50[88000] -> 51[8a000] via P2P/IPC +r7i7n1:68405:68536 [1] NCCL INFO Channel 02 : 37[1c000] -> 38[88000] via P2P/IPC +r8i0n3:57934:58067 [2] NCCL INFO Channel 02 : 46[88000] -> 47[8a000] via P2P/IPC +r7i4n5:46158:46289 [0] NCCL INFO Channel 02 : 20[1a000] -> 21[1c000] via P2P/IPC +r7i6n8:29152:29285 [0] NCCL INFO Channel 02 : 28[1a000] -> 29[1c000] via P2P/IPC +r7i1n3:4942:5074 [1] NCCL INFO Channel 02 : 5[1c000] -> 4[1a000] via P2P/IPC +r9i1n6:58314:58444 [2] NCCL INFO Channel 02 : 58[88000] -> 57[1c000] via P2P/IPC +r7i5n3:79994:80125 [0] NCCL INFO Channel 02 : 23[8a000] -> 24[1a000] [receive] via NET/IB/3 +r7i7n2:57609:57739 [1] NCCL INFO Channel 02 : 41[1c000] -> 42[88000] via P2P/IPC +r7i3n0:38598:38730 [0] NCCL INFO Channel 02 : 11[8a000] -> 12[1a000] [receive] via NET/IB/3 +r7i5n3:79997:80127 [3] NCCL INFO Channel 02 : 27[8a000] -> 28[1a000] [send] via NET/IB/2 +r7i7n2:57610:57740 [2] NCCL INFO Channel 02 : 42[88000] -> 43[8a000] via P2P/IPC +r7i3n0:38601:38732 [3] NCCL INFO Channel 02 : 15[8a000] -> 16[1a000] [send] via NET/IB/2 +r7i2n6:893:1023 [1] NCCL INFO Channel 02 : 9[1c000] -> 8[1a000] via P2P/IPC +r7i4n4:1766:1952 [0] NCCL INFO Channel 02 : 15[8a000] -> 16[1a000] [receive] via NET/IB/3 +r9i1n7:8762:8894 [2] NCCL INFO Channel 02 : 62[88000] -> 61[1c000] via P2P/IPC +r7i5n3:79994:80125 [0] NCCL INFO Channel 02 : 24[1a000] -> 25[1c000] via P2P/IPC +r7i3n0:38598:38730 [0] NCCL INFO Channel 02 : 12[1a000] -> 13[1c000] via P2P/IPC +r6i4n5:37340:37560 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57934:58067 [2] NCCL INFO Channel 02 : 46[88000] -> 45[1c000] via P2P/IPC +r7i7n1:68406:68537 [2] NCCL INFO Channel 02 : 38[88000] -> 37[1c000] via P2P/IPC +r7i4n4:1769:1949 [3] NCCL INFO Channel 02 : 19[8a000] -> 20[1a000] [send] via NET/IB/2 +r9i1n4:24820:24953 [2] NCCL INFO Channel 02 : 50[88000] -> 49[1c000] via P2P/IPC +r7i1n3:4944:5073 [3] NCCL INFO Channel 02 : 7[8a000] -> 6[88000] via P2P/IPC +r9i1n7:8760:8897 [0] NCCL INFO Channel 02 : 59[8a000] -> 60[1a000] [receive] via NET/IB/3 +r6i4n5:37342:37559 [3] NCCL INFO Channel 02 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46159:46291 [1] NCCL INFO Channel 02 : 21[1c000] -> 20[1a000] via P2P/IPC +r7i6n8:29153:29283 [1] NCCL INFO Channel 02 : 29[1c000] -> 28[1a000] via P2P/IPC +r7i4n4:1766:1952 [0] NCCL INFO Channel 02 : 16[1a000] -> 17[1c000] via P2P/IPC +r9i1n7:8763:8895 [3] NCCL INFO Channel 02 : 63[8a000] -> 0[1a000] [send] via NET/IB/2 +r7i7n2:57610:57740 [2] NCCL INFO Channel 02 : 42[88000] -> 41[1c000] via P2P/IPC +r9i1n7:8760:8897 [0] NCCL INFO Channel 02 : 60[1a000] -> 61[1c000] via P2P/IPC +r7i4n5:46161:46292 [3] NCCL INFO Channel 02 : 23[8a000] -> 22[88000] via P2P/IPC +r7i5n3:79995:80126 [1] NCCL INFO Channel 02 : 25[1c000] -> 24[1a000] via P2P/IPC +r7i3n0:38599:38731 [1] NCCL INFO Channel 02 : 13[1c000] -> 12[1a000] via P2P/IPC +r7i2n6:895:1026 [3] NCCL INFO Channel 02 : 11[8a000] -> 10[88000] via P2P/IPC +r7i5n3:79997:80127 [3] NCCL INFO Channel 02 : 27[8a000] -> 26[88000] via P2P/IPC +r7i4n4:1767:1950 [1] NCCL INFO Channel 02 : 17[1c000] -> 16[1a000] via P2P/IPC +r7i3n0:38601:38732 [3] NCCL INFO Channel 02 : 15[8a000] -> 14[88000] via P2P/IPC +r7i1n3:4943:5075 [2] NCCL INFO Channel 03 : 6[88000] -> 5[1c000] via P2P/IPC +r7i4n4:1769:1949 [3] NCCL INFO Channel 02 : 19[8a000] -> 18[88000] via P2P/IPC +r7i1n3:4944:5073 [3] NCCL INFO Channel 03 : 7[8a000] -> 6[88000] via P2P/IPC +r7i7n0:55673:55804 [0] NCCL INFO Channel 02 : 31[8a000] -> 32[1a000] [receive] via NET/IB/3 +r7i2n6:894:1024 [2] NCCL INFO Channel 03 : 10[88000] -> 9[1c000] via P2P/IPC +r6i4n5:37341:37558 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n0:55676:55807 [3] NCCL INFO Channel 02 : 35[8a000] -> 36[1a000] [send] via NET/IB/2 +r7i2n6:895:1026 [3] NCCL INFO Channel 03 : 11[8a000] -> 10[88000] via P2P/IPC +r7i4n5:46161:46292 [3] NCCL INFO Channel 03 : 23[8a000] -> 22[88000] via P2P/IPC +r6i4n5:37342:37559 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55673:55804 [0] NCCL INFO Channel 02 : 32[1a000] -> 33[1c000] via P2P/IPC +r9i1n7:8763:8895 [3] NCCL INFO Channel 02 : 63[8a000] -> 62[88000] via P2P/IPC +r7i4n5:46160:46290 [2] NCCL INFO Channel 03 : 22[88000] -> 21[1c000] via P2P/IPC +r9i1n5:40813:40946 [0] NCCL INFO Channel 02 : 51[8a000] -> 52[1a000] [receive] via NET/IB/3 +r9i1n5:40816:40947 [3] NCCL INFO Channel 02 : 55[8a000] -> 56[1a000] [send] via NET/IB/2 +r9i1n7:8761:8896 [1] NCCL INFO Channel 02 : 61[1c000] -> 60[1a000] via P2P/IPC +r9i1n5:40813:40946 [0] NCCL INFO Channel 02 : 52[1a000] -> 53[1c000] via P2P/IPC +r9i1n6:58312:58446 [0] NCCL INFO Channel 02 : 55[8a000] -> 56[1a000] [receive] via NET/IB/3 +r7i5n3:79996:80128 [2] NCCL INFO Channel 03 : 26[88000] -> 25[1c000] via P2P/IPC +r8i0n3:57935:58066 [3] NCCL INFO Channel 02 : 47[8a000] -> 48[1a000] [send] via NET/IB/2 +r9i1n6:58315:58445 [3] NCCL INFO Channel 02 : 59[8a000] -> 60[1a000] [send] via NET/IB/2 +r7i5n3:79997:80127 [3] NCCL INFO Channel 03 : 27[8a000] -> 26[88000] via P2P/IPC +r7i3n0:38600:38733 [2] NCCL INFO Channel 03 : 14[88000] -> 13[1c000] via P2P/IPC +r8i0n3:57932:58065 [0] NCCL INFO Channel 02 : 43[8a000] -> 44[1a000] [receive] via NET/IB/3 +r9i1n6:58312:58446 [0] NCCL INFO Channel 02 : 56[1a000] -> 57[1c000] via P2P/IPC +r7i3n0:38601:38732 [3] NCCL INFO Channel 03 : 15[8a000] -> 14[88000] via P2P/IPC +r7i7n1:68404:68538 [0] NCCL INFO Channel 02 : 35[8a000] -> 36[1a000] [receive] via NET/IB/3 +r9i1n4:24821:24954 [3] NCCL INFO Channel 02 : 51[8a000] -> 52[1a000] [send] via NET/IB/2 +r7i6n8:29155:29286 [3] NCCL INFO Channel 02 : 31[8a000] -> 30[88000] via P2P/IPC +r7i7n0:55674:55805 [1] NCCL INFO Channel 02 : 33[1c000] -> 32[1a000] via P2P/IPC +r7i4n4:1768:1951 [2] NCCL INFO Channel 03 : 18[88000] -> 17[1c000] via P2P/IPC +r8i0n3:57932:58065 [0] NCCL INFO Channel 02 : 44[1a000] -> 45[1c000] via P2P/IPC +r7i4n4:1769:1949 [3] NCCL INFO Channel 03 : 19[8a000] -> 18[88000] via P2P/IPC +r7i7n1:68407:68535 [3] NCCL INFO Channel 02 : 39[8a000] -> 40[1a000] [send] via NET/IB/2 +r9i1n4:24818:24952 [0] NCCL INFO Channel 02 : 47[8a000] -> 48[1a000] [receive] via NET/IB/3 +r7i7n1:68404:68538 [0] NCCL INFO Channel 02 : 36[1a000] -> 37[1c000] via P2P/IPC +r9i1n4:24818:24952 [0] NCCL INFO Channel 02 : 48[1a000] -> 49[1c000] via P2P/IPC +r7i7n2:57608:57742 [0] NCCL INFO Channel 02 : 39[8a000] -> 40[1a000] [receive] via NET/IB/3 +r9i1n5:40814:40945 [1] NCCL INFO Channel 02 : 53[1c000] -> 52[1a000] via P2P/IPC +r7i7n2:57611:57741 [3] NCCL INFO Channel 02 : 43[8a000] -> 44[1a000] [send] via NET/IB/2 +r7i6n8:29155:29286 [3] NCCL INFO Channel 03 : 31[8a000] -> 30[88000] via P2P/IPC +r9i1n5:40816:40947 [3] NCCL INFO Channel 02 : 55[8a000] -> 54[88000] via P2P/IPC +r7i7n2:57608:57742 [0] NCCL INFO Channel 02 : 40[1a000] -> 41[1c000] via P2P/IPC +r7i6n8:29154:29284 [2] NCCL INFO Channel 03 : 30[88000] -> 29[1c000] via P2P/IPC +r8i0n3:57933:58064 [1] NCCL INFO Channel 02 : 45[1c000] -> 44[1a000] via P2P/IPC +r9i1n6:58313:58443 [1] NCCL INFO Channel 02 : 57[1c000] -> 56[1a000] via P2P/IPC +r9i1n7:8763:8895 [3] NCCL INFO Channel 03 : 63[8a000] -> 62[88000] via P2P/IPC +r7i7n1:68405:68536 [1] NCCL INFO Channel 02 : 37[1c000] -> 36[1a000] via P2P/IPC +r7i1n3:4941:5076 [0] NCCL INFO Channel 02 : 36[1a000] -> 4[1a000] [receive] via NET/IB/3 +r9i1n7:8762:8894 [2] NCCL INFO Channel 03 : 62[88000] -> 61[1c000] via P2P/IPC +r7i7n0:55676:55807 [3] NCCL INFO Channel 02 : 35[8a000] -> 34[88000] via P2P/IPC +r9i1n6:58315:58445 [3] NCCL INFO Channel 02 : 59[8a000] -> 58[88000] via P2P/IPC +r8i0n3:57935:58066 [3] NCCL INFO Channel 02 : 47[8a000] -> 46[88000] via P2P/IPC +r7i2n6:892:1025 [0] NCCL INFO Channel 02 : 8[1a000] -> 12[1a000] [send] via NET/IB/3 +r9i1n4:24821:24954 [3] NCCL INFO Channel 02 : 51[8a000] -> 50[88000] via P2P/IPC +r9i1n4:24819:24951 [1] NCCL INFO Channel 02 : 49[1c000] -> 48[1a000] via P2P/IPC +r7i7n2:57609:57739 [1] NCCL INFO Channel 02 : 41[1c000] -> 40[1a000] via P2P/IPC +r7i7n1:68407:68535 [3] NCCL INFO Channel 02 : 39[8a000] -> 38[88000] via P2P/IPC +r7i7n2:57611:57741 [3] NCCL INFO Channel 02 : 43[8a000] -> 42[88000] via P2P/IPC +r7i7n0:55675:55806 [2] NCCL INFO Channel 03 : 34[88000] -> 33[1c000] via P2P/IPC +r7i7n0:55676:55807 [3] NCCL INFO Channel 03 : 35[8a000] -> 34[88000] via P2P/IPC +r9i1n5:40816:40947 [3] NCCL INFO Channel 03 : 55[8a000] -> 54[88000] via P2P/IPC +r7i6n8:29152:29285 [0] NCCL INFO Channel 02 : 24[1a000] -> 28[1a000] [receive] via NET/IB/3 +r7i2n6:893:1023 [1] NCCL INFO Channel 03 : 9[1c000] -> 12[1a000] [send] via NET/IB/3 +r9i1n5:40815:40948 [2] NCCL INFO Channel 03 : 54[88000] -> 53[1c000] via P2P/IPC +r7i1n3:4942:5074 [1] NCCL INFO Channel 03 : 5[1c000] -> 8[1a000] [send] via NET/IB/3 +r7i5n3:79994:80125 [0] NCCL INFO Channel 02 : 24[1a000] -> 28[1a000] [send] via NET/IB/3 +r8i0n3:57934:58067 [2] NCCL INFO Channel 03 : 46[88000] -> 45[1c000] via P2P/IPC +r7i3n0:38598:38730 [0] NCCL INFO Channel 02 : 8[1a000] -> 12[1a000] [receive] via NET/IB/3 +r9i1n6:58314:58444 [2] NCCL INFO Channel 03 : 58[88000] -> 57[1c000] via P2P/IPC +r7i6n8:29153:29283 [1] NCCL INFO Channel 03 : 29[1c000] -> 32[1a000] [send] via NET/IB/3 +r8i0n3:57935:58066 [3] NCCL INFO Channel 03 : 47[8a000] -> 46[88000] via P2P/IPC +r7i1n3:4943:5075 [2] NCCL INFO Channel 03 : 38[88000] -> 6[88000] [receive] via NET/IB/2 +r7i4n4:1766:1952 [0] NCCL INFO Channel 02 : 16[1a000] -> 12[1a000] [send] via NET/IB/3 +r7i4n5:46158:46289 [0] NCCL INFO Channel 02 : 12[1a000] -> 20[1a000] [receive] via NET/IB/3 +r9i1n4:24820:24953 [2] NCCL INFO Channel 03 : 50[88000] -> 49[1c000] via P2P/IPC +r9i1n6:58315:58445 [3] NCCL INFO Channel 03 : 59[8a000] -> 58[88000] via P2P/IPC +r7i7n1:68406:68537 [2] NCCL INFO Channel 03 : 38[88000] -> 37[1c000] via P2P/IPC +r9i1n4:24821:24954 [3] NCCL INFO Channel 03 : 51[8a000] -> 50[88000] via P2P/IPC +r6i4n5:37339:37557 [0] NCCL INFO Channel 02 : 0[1a000] -> 60[1a000] [send] via NET/IB/3 +r7i2n6:894:1024 [2] NCCL INFO Channel 03 : 10[88000] -> 14[88000] [send] via NET/IB/2 +r7i7n2:57610:57740 [2] NCCL INFO Channel 03 : 42[88000] -> 41[1c000] via P2P/IPC +r7i7n1:68407:68535 [3] NCCL INFO Channel 03 : 39[8a000] -> 38[88000] via P2P/IPC +r7i7n2:57611:57741 [3] NCCL INFO Channel 03 : 43[8a000] -> 42[88000] via P2P/IPC +r7i3n0:38599:38731 [1] NCCL INFO Channel 03 : 13[1c000] -> 16[1a000] [send] via NET/IB/3 +r7i4n5:46160:46290 [2] NCCL INFO Channel 03 : 14[88000] -> 22[88000] [receive] via NET/IB/2 +r7i5n3:79995:80126 [1] NCCL INFO Channel 03 : 25[1c000] -> 28[1a000] [send] via NET/IB/3 +r7i4n4:1767:1950 [1] NCCL INFO Channel 03 : 17[1c000] -> 20[1a000] [send] via NET/IB/3 +r7i4n5:46159:46291 [1] NCCL INFO Channel 03 : 21[1c000] -> 24[1a000] [send] via NET/IB/3 +r6i4n5:37340:37560 [1] NCCL INFO Channel 03 : 1[1c000] -> 4[1a000] [send] via NET/IB/3 +r7i5n3:79996:80128 [2] NCCL INFO Channel 03 : 26[88000] -> 30[88000] [send] via NET/IB/2 +r7i3n0:38600:38733 [2] NCCL INFO Channel 03 : 10[88000] -> 14[88000] [receive] via NET/IB/2 +r6i4n5:37341:37558 [2] NCCL INFO Channel 03 : 2[88000] -> 62[88000] [send] via NET/IB/2 +r7i4n4:1768:1951 [2] NCCL INFO Channel 03 : 18[88000] -> 14[88000] [send] via NET/IB/2 +r7i7n0:55673:55804 [0] NCCL INFO Channel 02 : 32[1a000] -> 28[1a000] [send] via NET/IB/3 +r7i7n0:55674:55805 [1] NCCL INFO Channel 03 : 33[1c000] -> 36[1a000] [send] via NET/IB/3 +r9i1n7:8760:8897 [0] NCCL INFO Channel 02 : 56[1a000] -> 60[1a000] [receive] via NET/IB/3 +r7i6n8:29154:29284 [2] NCCL INFO Channel 03 : 26[88000] -> 30[88000] [receive] via NET/IB/2 +r9i1n6:58312:58446 [0] NCCL INFO Channel 02 : 56[1a000] -> 60[1a000] [send] via NET/IB/3 +r7i7n1:68404:68538 [0] NCCL INFO Channel 02 : 20[1a000] -> 36[1a000] [receive] via NET/IB/3 +r9i1n5:40813:40946 [0] NCCL INFO Channel 02 : 44[1a000] -> 52[1a000] [receive] via NET/IB/3 +r9i1n4:24818:24952 [0] NCCL INFO Channel 02 : 48[1a000] -> 44[1a000] [send] via NET/IB/3 +r7i6n8:29152:29285 [0] NCCL INFO Channel 02 : 32[1a000] -> 28[1a000] [receive] via NET/IB/3 +r9i1n7:8761:8896 [1] NCCL INFO Channel 03 : 61[1c000] -> 0[1a000] [send] via NET/IB/3 +r9i1n7:8762:8894 [2] NCCL INFO Channel 03 : 58[88000] -> 62[88000] [receive] via NET/IB/2 +r8i0n3:57932:58065 [0] NCCL INFO Channel 02 : 40[1a000] -> 44[1a000] [receive] via NET/IB/3 +r7i7n2:57608:57742 [0] NCCL INFO Channel 02 : 40[1a000] -> 44[1a000] [send] via NET/IB/3 +r7i7n0:55675:55806 [2] NCCL INFO Channel 03 : 34[88000] -> 30[88000] [send] via NET/IB/2 +r7i3n0:38598:38730 [0] NCCL INFO Channel 02 : 16[1a000] -> 12[1a000] [receive] via NET/IB/3 +r9i1n5:40815:40948 [2] NCCL INFO Channel 03 : 46[88000] -> 54[88000] [receive] via NET/IB/2 +r9i1n6:58313:58443 [1] NCCL INFO Channel 03 : 57[1c000] -> 60[1a000] [send] via NET/IB/3 +r9i1n5:40814:40945 [1] NCCL INFO Channel 03 : 53[1c000] -> 56[1a000] [send] via NET/IB/3 +r7i4n5:46158:46289 [0] NCCL INFO Channel 02 : 28[1a000] -> 20[1a000] [receive] via NET/IB/3 +r7i7n1:68405:68536 [1] NCCL INFO Channel 03 : 37[1c000] -> 40[1a000] [send] via NET/IB/3 +r9i1n6:58314:58444 [2] NCCL INFO Channel 03 : 58[88000] -> 62[88000] [send] via NET/IB/2 +r9i1n4:24819:24951 [1] NCCL INFO Channel 03 : 49[1c000] -> 52[1a000] [send] via NET/IB/3 +r8i0n3:57933:58064 [1] NCCL INFO Channel 03 : 45[1c000] -> 48[1a000] [send] via NET/IB/3 +r8i0n3:57934:58067 [2] NCCL INFO Channel 03 : 42[88000] -> 46[88000] [receive] via NET/IB/2 +r7i2n6:892:1025 [0] NCCL INFO Channel 02 : 12[1a000] -> 8[1a000] [receive] via NET/IB/3 +r9i1n4:24820:24953 [2] NCCL INFO Channel 03 : 50[88000] -> 46[88000] [send] via NET/IB/2 +r7i5n3:79994:80125 [0] NCCL INFO Channel 02 : 28[1a000] -> 24[1a000] [receive] via NET/IB/3 +r7i7n1:68406:68537 [2] NCCL INFO Channel 03 : 22[88000] -> 38[88000] [receive] via NET/IB/2 +r7i7n2:57609:57739 [1] NCCL INFO Channel 03 : 41[1c000] -> 44[1a000] [send] via NET/IB/3 +r7i7n2:57610:57740 [2] NCCL INFO Channel 03 : 42[88000] -> 46[88000] [send] via NET/IB/2 +r7i4n5:46160:46290 [2] NCCL INFO Channel 03 : 30[88000] -> 22[88000] [receive] via NET/IB/2 +r7i3n0:38600:38733 [2] NCCL INFO Channel 03 : 18[88000] -> 14[88000] [receive] via NET/IB/2 +r7i2n6:894:1024 [2] NCCL INFO Channel 03 : 14[88000] -> 10[88000] [receive] via NET/IB/2 +r7i2n6:894:1024 [2] NCCL INFO Channel 03 : 10[88000] -> 11[8a000] via P2P/IPC +r9i1n7:8760:8897 [0] NCCL INFO Channel 02 : 0[1a000] -> 60[1a000] [receive] via NET/IB/3 +r7i6n8:29154:29284 [2] NCCL INFO Channel 03 : 34[88000] -> 30[88000] [receive] via NET/IB/2 +r7i7n1:68404:68538 [0] NCCL INFO Channel 02 : 52[1a000] -> 36[1a000] [receive] via NET/IB/3 +r7i5n3:79996:80128 [2] NCCL INFO Channel 03 : 30[88000] -> 26[88000] [receive] via NET/IB/2 +r9i1n5:40813:40946 [0] NCCL INFO Channel 02 : 60[1a000] -> 52[1a000] [receive] via NET/IB/3 +r7i6n8:29152:29285 [0] NCCL INFO Channel 02 : 28[1a000] -> 20[1a000] [send] via NET/IB/3 +r9i1n7:8762:8894 [2] NCCL INFO Channel 03 : 2[88000] -> 62[88000] [receive] via NET/IB/2 +r7i5n3:79996:80128 [2] NCCL INFO Channel 03 : 26[88000] -> 27[8a000] via P2P/IPC +r8i0n3:57932:58065 [0] NCCL INFO Channel 02 : 48[1a000] -> 44[1a000] [receive] via NET/IB/3 +r7i3n0:38598:38730 [0] NCCL INFO Channel 02 : 12[1a000] -> 20[1a000] [send] via NET/IB/3 +r9i1n5:40815:40948 [2] NCCL INFO Channel 03 : 62[88000] -> 54[88000] [receive] via NET/IB/2 +r9i1n6:58312:58446 [0] NCCL INFO Channel 02 : 60[1a000] -> 56[1a000] [receive] via NET/IB/3 +r7i7n0:55673:55804 [0] NCCL INFO Channel 02 : 28[1a000] -> 32[1a000] [receive] via NET/IB/3 +r7i4n5:46158:46289 [0] NCCL INFO Channel 02 : 20[1a000] -> 36[1a000] [send] via NET/IB/3 +r8i0n3:57934:58067 [2] NCCL INFO Channel 03 : 50[88000] -> 46[88000] [receive] via NET/IB/2 +r7i7n2:57608:57742 [0] NCCL INFO Channel 02 : 44[1a000] -> 40[1a000] [receive] via NET/IB/3 +r7i4n4:1766:1952 [0] NCCL INFO Channel 02 : 12[1a000] -> 16[1a000] [receive] via NET/IB/3 +r7i7n1:68406:68537 [2] NCCL INFO Channel 03 : 54[88000] -> 38[88000] [receive] via NET/IB/2 +r9i1n6:58314:58444 [2] NCCL INFO Channel 03 : 62[88000] -> 58[88000] [receive] via NET/IB/2 +r9i1n6:58314:58444 [2] NCCL INFO Channel 03 : 58[88000] -> 59[8a000] via P2P/IPC +r7i4n5:46160:46290 [2] NCCL INFO Channel 03 : 22[88000] -> 38[88000] [send] via NET/IB/2 +r7i3n0:38600:38733 [2] NCCL INFO Channel 03 : 14[88000] -> 22[88000] [send] via NET/IB/2 +r7i7n2:57610:57740 [2] NCCL INFO Channel 03 : 46[88000] -> 42[88000] [receive] via NET/IB/2 +r7i7n2:57610:57740 [2] NCCL INFO Channel 03 : 42[88000] -> 43[8a000] via P2P/IPC +r7i4n4:1768:1951 [2] NCCL INFO Channel 03 : 14[88000] -> 18[88000] [receive] via NET/IB/2 +r7i4n4:1768:1951 [2] NCCL INFO Channel 03 : 18[88000] -> 19[8a000] via P2P/IPC +r9i1n7:8760:8897 [0] NCCL INFO Channel 02 : 60[1a000] -> 52[1a000] [send] via NET/IB/3 +r7i6n8:29154:29284 [2] NCCL INFO Channel 03 : 30[88000] -> 22[88000] [send] via NET/IB/2 +r7i7n1:68404:68538 [0] NCCL INFO Channel 02 : 36[1a000] -> 4[1a000] [send] via NET/IB/3 +r7i7n0:55675:55806 [2] NCCL INFO Channel 03 : 30[88000] -> 34[88000] [receive] via NET/IB/2 +r7i7n0:55675:55806 [2] NCCL INFO Channel 03 : 34[88000] -> 35[8a000] via P2P/IPC +r9i1n5:40813:40946 [0] NCCL INFO Channel 02 : 52[1a000] -> 36[1a000] [send] via NET/IB/3 +r8i0n3:57932:58065 [0] NCCL INFO Channel 02 : 44[1a000] -> 52[1a000] [send] via NET/IB/3 +r9i1n7:8762:8894 [2] NCCL INFO Channel 03 : 62[88000] -> 54[88000] [send] via NET/IB/2 +r6i4n5:37339:37557 [0] NCCL INFO Channel 02 : 60[1a000] -> 0[1a000] [receive] via NET/IB/3 +r6i4n5:37341:37558 [2] NCCL INFO Channel 03 : 62[88000] -> 2[88000] [receive] via NET/IB/2 +r9i1n5:40815:40948 [2] NCCL INFO Channel 03 : 54[88000] -> 38[88000] [send] via NET/IB/2 +r9i1n4:24818:24952 [0] NCCL INFO Channel 02 : 44[1a000] -> 48[1a000] [receive] via NET/IB/3 +r6i4n5:37341:37558 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r8i0n3:57934:58067 [2] NCCL INFO Channel 03 : 46[88000] -> 54[88000] [send] via NET/IB/2 +r7i7n1:68406:68537 [2] NCCL INFO Channel 03 : 38[88000] -> 6[88000] [send] via NET/IB/2 +r9i1n4:24820:24953 [2] NCCL INFO Channel 03 : 46[88000] -> 50[88000] [receive] via NET/IB/2 +r7i6n8:29152:29285 [0] NCCL INFO Channel 02 : 20[1a000] -> 28[1a000] [receive] via NET/IB/3 +r9i1n4:24820:24953 [2] NCCL INFO Channel 03 : 50[88000] -> 51[8a000] via P2P/IPC +r7i3n0:38598:38730 [0] NCCL INFO Channel 02 : 20[1a000] -> 12[1a000] [receive] via NET/IB/3 +r7i1n3:4943:5075 [2] NCCL INFO Channel 03 : 6[88000] -> 7[8a000] via P2P/IPC +r7i4n5:46158:46289 [0] NCCL INFO Channel 02 : 36[1a000] -> 20[1a000] [receive] via NET/IB/3 +r7i3n0:38600:38733 [2] NCCL INFO Channel 03 : 22[88000] -> 14[88000] [receive] via NET/IB/2 +r7i4n5:46160:46290 [2] NCCL INFO Channel 03 : 38[88000] -> 22[88000] [receive] via NET/IB/2 +r7i3n0:38600:38733 [2] NCCL INFO Channel 03 : 14[88000] -> 15[8a000] via P2P/IPC +r7i4n5:46160:46290 [2] NCCL INFO Channel 03 : 22[88000] -> 23[8a000] via P2P/IPC +r7i1n3:4941:5076 [0] NCCL INFO Channel 02 : 4[1a000] -> 36[1a000] [send] via NET/IB/3 +r7i6n8:29154:29284 [2] NCCL INFO Channel 03 : 22[88000] -> 30[88000] [receive] via NET/IB/2 +r7i6n8:29154:29284 [2] NCCL INFO Channel 03 : 30[88000] -> 31[8a000] via P2P/IPC +r9i1n7:8760:8897 [0] NCCL INFO Channel 02 : 52[1a000] -> 60[1a000] [receive] via NET/IB/3 +r7i6n8:29152:29285 [0] NCCL INFO Channel 02 : 28[1a000] -> 24[1a000] [send] via NET/IB/3 +r9i1n7:8762:8894 [2] NCCL INFO Channel 03 : 54[88000] -> 62[88000] [receive] via NET/IB/2 +r7i7n1:68404:68538 [0] NCCL INFO Channel 02 : 4[1a000] -> 36[1a000] [receive] via NET/IB/3 +r8i0n3:57932:58065 [0] NCCL INFO Channel 02 : 52[1a000] -> 44[1a000] [receive] via NET/IB/3 +r7i3n0:38598:38730 [0] NCCL INFO Channel 02 : 12[1a000] -> 8[1a000] [send] via NET/IB/3 +r7i1n3:4943:5075 [2] NCCL INFO Channel 03 : 6[88000] -> 38[88000] [send] via NET/IB/2 +r9i1n7:8762:8894 [2] NCCL INFO Channel 03 : 62[88000] -> 63[8a000] via P2P/IPC +r9i1n5:40813:40946 [0] NCCL INFO Channel 02 : 36[1a000] -> 52[1a000] [receive] via NET/IB/3 +r9i1n5:40815:40948 [2] NCCL INFO Channel 03 : 38[88000] -> 54[88000] [receive] via NET/IB/2 +r8i0n3:57934:58067 [2] NCCL INFO Channel 03 : 54[88000] -> 46[88000] [receive] via NET/IB/2 +r9i1n5:40815:40948 [2] NCCL INFO Channel 03 : 54[88000] -> 55[8a000] via P2P/IPC +r8i0n3:57934:58067 [2] NCCL INFO Channel 03 : 46[88000] -> 47[8a000] via P2P/IPC +r7i7n1:68406:68537 [2] NCCL INFO Channel 03 : 6[88000] -> 38[88000] [receive] via NET/IB/2 +r7i7n1:68406:68537 [2] NCCL INFO Channel 03 : 38[88000] -> 39[8a000] via P2P/IPC +r7i4n5:46158:46289 [0] NCCL INFO Channel 02 : 20[1a000] -> 12[1a000] [send] via NET/IB/3 +r7i3n0:38600:38733 [2] NCCL INFO Channel 03 : 14[88000] -> 10[88000] [send] via NET/IB/2 +r7i4n5:46160:46290 [2] NCCL INFO Channel 03 : 22[88000] -> 14[88000] [send] via NET/IB/2 +r7i6n8:29154:29284 [2] NCCL INFO Channel 03 : 30[88000] -> 26[88000] [send] via NET/IB/2 +r9i1n7:8760:8897 [0] NCCL INFO Channel 02 : 60[1a000] -> 56[1a000] [send] via NET/IB/3 +r7i6n8:29152:29285 [0] NCCL INFO Channel 02 : 28[1a000] -> 32[1a000] [send] via NET/IB/3 +r7i7n1:68404:68538 [0] NCCL INFO Channel 02 : 36[1a000] -> 20[1a000] [send] via NET/IB/3 +r8i0n3:57932:58065 [0] NCCL INFO Channel 02 : 44[1a000] -> 40[1a000] [send] via NET/IB/3 +r7i3n0:38598:38730 [0] NCCL INFO Channel 02 : 12[1a000] -> 16[1a000] [send] via NET/IB/3 +r9i1n7:8762:8894 [2] NCCL INFO Channel 03 : 62[88000] -> 58[88000] [send] via NET/IB/2 +r7i1n3:4941:5076 [0] NCCL INFO Channel 03 : 1[1c000] -> 4[1a000] [receive] via NET/IB/3 +r9i1n5:40813:40946 [0] NCCL INFO Channel 02 : 52[1a000] -> 44[1a000] [send] via NET/IB/3 +r9i1n5:40815:40948 [2] NCCL INFO Channel 03 : 54[88000] -> 46[88000] [send] via NET/IB/2 +r7i1n3:4941:5076 [0] NCCL INFO Channel 03 : 4[1a000] -> 7[8a000] via P2P/IPC +r8i0n3:57934:58067 [2] NCCL INFO Channel 03 : 46[88000] -> 42[88000] [send] via NET/IB/2 +r7i7n1:68406:68537 [2] NCCL INFO Channel 03 : 38[88000] -> 22[88000] [send] via NET/IB/2 +r7i4n5:46158:46289 [0] NCCL INFO Channel 02 : 20[1a000] -> 28[1a000] [send] via NET/IB/3 +r7i3n0:38600:38733 [2] NCCL INFO Channel 03 : 14[88000] -> 18[88000] [send] via NET/IB/2 +r7i1n3:4944:5073 [3] NCCL INFO Channel 03 : 7[8a000] -> 4[1a000] via P2P/IPC +r7i4n5:46160:46290 [2] NCCL INFO Channel 03 : 22[88000] -> 30[88000] [send] via NET/IB/2 +r6i4n5:37340:37560 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4943:5075 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i1n3:4943:5075 [2] NCCL INFO comm 0x153094005fc0 rank 6 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i6n8:29154:29284 [2] NCCL INFO Channel 03 : 30[88000] -> 34[88000] [send] via NET/IB/2 +r9i1n7:8760:8897 [0] NCCL INFO Channel 02 : 60[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i7n1:68404:68538 [0] NCCL INFO Channel 02 : 36[1a000] -> 52[1a000] [send] via NET/IB/3 +r8i0n3:57932:58065 [0] NCCL INFO Channel 02 : 44[1a000] -> 48[1a000] [send] via NET/IB/3 +r9i1n7:8762:8894 [2] NCCL INFO Channel 03 : 62[88000] -> 2[88000] [send] via NET/IB/2 +r7i5n3:79994:80125 [0] NCCL INFO Channel 03 : 21[1c000] -> 24[1a000] [receive] via NET/IB/3 +r7i5n3:79994:80125 [0] NCCL INFO Channel 03 : 24[1a000] -> 27[8a000] via P2P/IPC +r9i1n5:40815:40948 [2] NCCL INFO Channel 03 : 54[88000] -> 62[88000] [send] via NET/IB/2 +r8i0n3:57934:58067 [2] NCCL INFO Channel 03 : 46[88000] -> 50[88000] [send] via NET/IB/2 +r7i2n6:892:1025 [0] NCCL INFO Channel 03 : 5[1c000] -> 8[1a000] [receive] via NET/IB/3 +r9i1n5:40813:40946 [0] NCCL INFO Channel 02 : 52[1a000] -> 60[1a000] [send] via NET/IB/3 +r7i7n0:55673:55804 [0] NCCL INFO Channel 03 : 29[1c000] -> 32[1a000] [receive] via NET/IB/3 +r7i7n1:68406:68537 [2] NCCL INFO Channel 03 : 38[88000] -> 54[88000] [send] via NET/IB/2 +r7i2n6:892:1025 [0] NCCL INFO Channel 03 : 8[1a000] -> 11[8a000] via P2P/IPC +r7i7n0:55673:55804 [0] NCCL INFO Channel 03 : 32[1a000] -> 35[8a000] via P2P/IPC +r7i5n3:79997:80127 [3] NCCL INFO Channel 03 : 27[8a000] -> 24[1a000] via P2P/IPC +r7i4n5:46159:46291 [1] NCCL INFO Channel 03 : 21[1c000] -> 20[1a000] via P2P/IPC +r7i4n4:1766:1952 [0] NCCL INFO Channel 03 : 13[1c000] -> 16[1a000] [receive] via NET/IB/3 +r7i4n4:1766:1952 [0] NCCL INFO Channel 03 : 16[1a000] -> 19[8a000] via P2P/IPC +r7i1n3:4942:5074 [1] NCCL INFO Channel 03 : 5[1c000] -> 4[1a000] via P2P/IPC +r7i2n6:895:1026 [3] NCCL INFO Channel 03 : 11[8a000] -> 8[1a000] via P2P/IPC +r7i6n8:29153:29283 [1] NCCL INFO Channel 03 : 29[1c000] -> 28[1a000] via P2P/IPC +r7i7n0:55676:55807 [3] NCCL INFO Channel 03 : 35[8a000] -> 32[1a000] via P2P/IPC +r7i3n0:38598:38730 [0] NCCL INFO Channel 03 : 9[1c000] -> 12[1a000] [receive] via NET/IB/3 +r7i3n0:38598:38730 [0] NCCL INFO Channel 03 : 12[1a000] -> 15[8a000] via P2P/IPC +r7i4n4:1769:1949 [3] NCCL INFO Channel 03 : 19[8a000] -> 16[1a000] via P2P/IPC +r7i3n0:38599:38731 [1] NCCL INFO Channel 03 : 13[1c000] -> 12[1a000] via P2P/IPC +r7i1n3:4941:5076 [0] NCCL INFO Channel 03 : 4[1a000] -> 5[1c000] via P2P/IPC +r7i6n8:29152:29285 [0] NCCL INFO Channel 03 : 25[1c000] -> 28[1a000] [receive] via NET/IB/3 +r7i1n3:4944:5073 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i1n3:4944:5073 [3] NCCL INFO comm 0x145160005fc0 rank 7 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i6n8:29152:29285 [0] NCCL INFO Channel 03 : 28[1a000] -> 31[8a000] via P2P/IPC +r7i2n6:893:1023 [1] NCCL INFO Channel 03 : 9[1c000] -> 8[1a000] via P2P/IPC +r7i1n3:4942:5074 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i1n3:4942:5074 [1] NCCL INFO comm 0x14e62c005fc0 rank 5 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i1n3:4941:5076 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i1n3:4941:5076 [0] NCCL INFO comm 0x151474005fc0 rank 4 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r7i3n0:38601:38732 [3] NCCL INFO Channel 03 : 15[8a000] -> 12[1a000] via P2P/IPC +r7i5n3:79995:80126 [1] NCCL INFO Channel 03 : 25[1c000] -> 24[1a000] via P2P/IPC +r9i1n6:58312:58446 [0] NCCL INFO Channel 03 : 53[1c000] -> 56[1a000] [receive] via NET/IB/3 +r7i6n8:29155:29286 [3] NCCL INFO Channel 03 : 31[8a000] -> 28[1a000] via P2P/IPC +r7i2n6:892:1025 [0] NCCL INFO Channel 03 : 8[1a000] -> 9[1c000] via P2P/IPC +r9i1n6:58312:58446 [0] NCCL INFO Channel 03 : 56[1a000] -> 59[8a000] via P2P/IPC +r7i2n6:895:1026 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i2n6:895:1026 [3] NCCL INFO comm 0x149ae415f010 rank 11 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i2n6:894:1024 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i4n5:46158:46289 [0] NCCL INFO Channel 03 : 17[1c000] -> 20[1a000] [receive] via NET/IB/3 +r7i2n6:893:1023 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i4n5:37339:37557 [0] NCCL INFO Channel 03 : 61[1c000] -> 0[1a000] [receive] via NET/IB/3 +r7i7n2:57608:57742 [0] NCCL INFO Channel 03 : 37[1c000] -> 40[1a000] [receive] via NET/IB/3 +r7i2n6:894:1024 [2] NCCL INFO comm 0x15297415e010 rank 10 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n5:46158:46289 [0] NCCL INFO Channel 03 : 20[1a000] -> 23[8a000] via P2P/IPC +r7i7n2:57608:57742 [0] NCCL INFO Channel 03 : 40[1a000] -> 43[8a000] via P2P/IPC +r7i2n6:892:1025 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i3n0:38598:38730 [0] NCCL INFO Channel 03 : 12[1a000] -> 13[1c000] via P2P/IPC +r7i5n3:79996:80128 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i2n6:892:1025 [0] NCCL INFO comm 0x151b3415e010 rank 8 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r7i3n0:38601:38732 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i5n3:79994:80125 [0] NCCL INFO Channel 03 : 24[1a000] -> 25[1c000] via P2P/IPC +r7i2n6:893:1023 [1] NCCL INFO comm 0x1532f815e010 rank 9 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i4n4:1768:1951 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i5n3:79996:80128 [2] NCCL INFO comm 0x14f6ec15e010 rank 26 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r6i4n5:37339:37557 [0] NCCL INFO Channel 03 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79997:80127 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n6:58315:58445 [3] NCCL INFO Channel 03 : 59[8a000] -> 56[1a000] via P2P/IPC +r9i1n5:40814:40945 [1] NCCL INFO Channel 03 : 53[1c000] -> 52[1a000] via P2P/IPC +r7i4n4:1768:1951 [2] NCCL INFO comm 0x15383015e010 rank 18 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i3n0:38599:38731 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i5n3:79997:80127 [3] NCCL INFO comm 0x1512cc15e010 rank 27 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n4:24818:24952 [0] NCCL INFO Channel 03 : 45[1c000] -> 48[1a000] [receive] via NET/IB/3 +r7i7n1:68404:68538 [0] NCCL INFO Channel 03 : 33[1c000] -> 36[1a000] [receive] via NET/IB/3 +r7i3n0:38599:38731 [1] NCCL INFO comm 0x14da18005fc0 rank 13 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i5n3:79995:80126 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i6n8:29152:29285 [0] NCCL INFO Channel 03 : 28[1a000] -> 29[1c000] via P2P/IPC +r7i3n0:38598:38730 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i3n0:38601:38732 [3] NCCL INFO comm 0x148620005fc0 rank 15 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n0:55675:55806 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n4:24818:24952 [0] NCCL INFO Channel 03 : 48[1a000] -> 51[8a000] via P2P/IPC +r7i5n3:79995:80126 [1] NCCL INFO comm 0x15059c15e010 rank 25 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i6n8:29155:29286 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i3n0:38598:38730 [0] NCCL INFO comm 0x14eacc005f50 rank 12 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r7i5n3:79994:80125 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i0n3:57932:58065 [0] NCCL INFO Channel 03 : 41[1c000] -> 44[1a000] [receive] via NET/IB/3 +r7i4n4:1767:1950 [1] NCCL INFO Channel 03 : 17[1c000] -> 16[1a000] via P2P/IPC +r7i7n0:55675:55806 [2] NCCL INFO comm 0x148d6415e010 rank 34 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i6n8:29155:29286 [3] NCCL INFO comm 0x14bc2c005fc0 rank 31 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i5n3:79994:80125 [0] NCCL INFO comm 0x14ceb015e010 rank 24 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n2:57611:57741 [3] NCCL INFO Channel 03 : 43[8a000] -> 40[1a000] via P2P/IPC +r7i7n1:68404:68538 [0] NCCL INFO Channel 03 : 36[1a000] -> 39[8a000] via P2P/IPC +r7i4n5:46161:46292 [3] NCCL INFO Channel 03 : 23[8a000] -> 20[1a000] via P2P/IPC +r8i0n3:57932:58065 [0] NCCL INFO Channel 03 : 44[1a000] -> 47[8a000] via P2P/IPC +r7i6n8:29153:29283 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n1:68405:68536 [1] NCCL INFO Channel 03 : 37[1c000] -> 36[1a000] via P2P/IPC +r9i1n7:8761:8896 [1] NCCL INFO Channel 03 : 61[1c000] -> 60[1a000] via P2P/IPC +r7i6n8:29152:29285 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i6n8:29153:29283 [1] NCCL INFO comm 0x154104005fc0 rank 29 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i6n8:29152:29285 [0] NCCL INFO comm 0x1500c8005f50 rank 28 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r6i4n5:37342:37559 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55674:55805 [1] NCCL INFO Channel 03 : 33[1c000] -> 32[1a000] via P2P/IPC +r9i1n4:24821:24954 [3] NCCL INFO Channel 03 : 51[8a000] -> 48[1a000] via P2P/IPC +r7i3n0:38600:38733 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i4n4:1766:1952 [0] NCCL INFO Channel 03 : 16[1a000] -> 17[1c000] via P2P/IPC +r7i7n2:57609:57739 [1] NCCL INFO Channel 03 : 41[1c000] -> 40[1a000] via P2P/IPC +r7i7n1:68407:68535 [3] NCCL INFO Channel 03 : 39[8a000] -> 36[1a000] via P2P/IPC +r7i4n4:1769:1949 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n5:40813:40946 [0] NCCL INFO Channel 03 : 49[1c000] -> 52[1a000] [receive] via NET/IB/3 +r8i0n3:57933:58064 [1] NCCL INFO Channel 03 : 45[1c000] -> 44[1a000] via P2P/IPC +r7i4n4:1769:1949 [3] NCCL INFO comm 0x149e1815f010 rank 19 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n7:8760:8897 [0] NCCL INFO Channel 03 : 57[1c000] -> 60[1a000] [receive] via NET/IB/3 +r7i4n4:1767:1950 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i3n0:38600:38733 [2] NCCL INFO comm 0x145220005fc0 rank 14 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n5:46158:46289 [0] NCCL INFO Channel 03 : 20[1a000] -> 21[1c000] via P2P/IPC +r9i1n5:40813:40946 [0] NCCL INFO Channel 03 : 52[1a000] -> 55[8a000] via P2P/IPC +r7i4n4:1767:1950 [1] NCCL INFO comm 0x1498e415e010 rank 17 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r8i0n3:57935:58066 [3] NCCL INFO Channel 03 : 47[8a000] -> 44[1a000] via P2P/IPC +r7i4n5:46161:46292 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i6n8:29154:29284 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i4n4:1766:1952 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i4n5:46161:46292 [3] NCCL INFO comm 0x154318005fc0 rank 23 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i6n8:29154:29284 [2] NCCL INFO comm 0x147c84005fc0 rank 30 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n0:55673:55804 [0] NCCL INFO Channel 03 : 32[1a000] -> 33[1c000] via P2P/IPC +r7i4n5:46159:46291 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n7:8760:8897 [0] NCCL INFO Channel 03 : 60[1a000] -> 63[8a000] via P2P/IPC +r7i7n0:55676:55807 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i4n5:46159:46291 [1] NCCL INFO comm 0x14ddb0005fc0 rank 21 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i4n5:46160:46290 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n0:55676:55807 [3] NCCL INFO comm 0x150d0815f010 rank 35 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i4n5:46158:46289 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n2:57608:57742 [0] NCCL INFO Channel 03 : 40[1a000] -> 41[1c000] via P2P/IPC +r7i4n4:1766:1952 [0] NCCL INFO comm 0x14e38815e010 rank 16 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n0:55674:55805 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i4n5:46160:46290 [2] NCCL INFO comm 0x14a654005fc0 rank 22 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n5:46158:46289 [0] NCCL INFO comm 0x14ea8c005f50 rank 20 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r6i4n5:37339:37557 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n2:57611:57741 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n0:55674:55805 [1] NCCL INFO comm 0x14db2815e010 rank 33 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n4:24819:24951 [1] NCCL INFO Channel 03 : 49[1c000] -> 48[1a000] via P2P/IPC +r7i7n2:57610:57740 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i4n5:37342:37559 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n2:57611:57741 [3] NCCL INFO comm 0x1502d815f010 rank 43 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n0:55673:55804 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n1:68404:68538 [0] NCCL INFO Channel 03 : 36[1a000] -> 37[1c000] via P2P/IPC +r7i7n2:57609:57739 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n5:40816:40947 [3] NCCL INFO Channel 03 : 55[8a000] -> 52[1a000] via P2P/IPC +r7i7n2:57610:57740 [2] NCCL INFO comm 0x14f1c415e010 rank 42 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r6i4n5:37342:37559 [3] NCCL INFO comm 0x15289c005fc0 rank 3 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n6:58313:58443 [1] NCCL INFO Channel 03 : 57[1c000] -> 56[1a000] via P2P/IPC +r7i7n0:55673:55804 [0] NCCL INFO comm 0x148ec015e010 rank 32 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n1:68407:68535 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i4n5:37340:37560 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n2:57609:57739 [1] NCCL INFO comm 0x14c9e815e010 rank 41 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r8i0n3:57932:58065 [0] NCCL INFO Channel 03 : 44[1a000] -> 45[1c000] via P2P/IPC +r7i7n1:68407:68535 [3] NCCL INFO comm 0x14d4f4005fc0 rank 39 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n2:57608:57742 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i4n5:37340:37560 [1] NCCL INFO comm 0x154010005fc0 rank 1 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n1:68405:68536 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i0n3:57935:58066 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i4n5:37339:37557 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n2:57608:57742 [0] NCCL INFO comm 0x14fb2c15e010 rank 40 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n1:68405:68536 [1] NCCL INFO comm 0x14aa28005fc0 rank 37 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n1:68406:68537 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i0n3:57935:58066 [3] NCCL INFO comm 0x151da4005fc0 rank 47 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n4:24820:24953 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r6i4n5:37339:37557 [0] NCCL INFO comm 0x147ac0005fc0 rank 0 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r8i0n3:57933:58064 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n1:68404:68538 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r7i7n1:68406:68537 [2] NCCL INFO comm 0x153cbc005fc0 rank 38 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r6i4n5:37339:37339 [0] NCCL INFO Launch mode Parallel +r9i1n4:24820:24953 [2] NCCL INFO comm 0x151d7815e010 rank 50 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r8i0n3:57933:58064 [1] NCCL INFO comm 0x15076c005fc0 rank 45 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n1:68404:68538 [0] NCCL INFO comm 0x1451f4005f50 rank 36 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +setting training data start iteration to 0 +r9i1n7:8763:8895 [3] NCCL INFO Channel 03 : 63[8a000] -> 60[1a000] via P2P/IPC +setting validation data start iteration to 0 +r8i0n3:57932:58065 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n4:24818:24952 [0] NCCL INFO Channel 03 : 48[1a000] -> 49[1c000] via P2P/IPC +r8i0n3:57934:58067 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i0n3:57932:58065 [0] NCCL INFO comm 0x1526b0005f50 rank 44 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n4:24821:24954 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r8i0n3:57934:58067 [2] NCCL INFO comm 0x15469c005fc0 rank 46 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n6:58312:58446 [0] NCCL INFO Channel 03 : 56[1a000] -> 57[1c000] via P2P/IPC +r9i1n4:24821:24954 [3] NCCL INFO comm 0x150ddc15f010 rank 51 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n6:58315:58445 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n4:24819:24951 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n5:40813:40946 [0] NCCL INFO Channel 03 : 52[1a000] -> 53[1c000] via P2P/IPC +r9i1n6:58315:58445 [3] NCCL INFO comm 0x1451c415f010 rank 59 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n4:24819:24951 [1] NCCL INFO comm 0x14f14415e010 rank 49 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n5:40816:40947 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n6:58313:58443 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n4:24818:24952 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n6:58313:58443 [1] NCCL INFO comm 0x1508a815e010 rank 57 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n6:58314:58444 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n4:24818:24952 [0] NCCL INFO comm 0x150ed015e010 rank 48 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n5:40816:40947 [3] NCCL INFO comm 0x152a04005fc0 rank 55 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n5:40814:40945 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n6:58312:58446 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n5:40815:40948 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n5:40814:40945 [1] NCCL INFO comm 0x145bfc005fc0 rank 53 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n6:58312:58446 [0] NCCL INFO comm 0x14a69415e010 rank 56 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n5:40813:40946 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n5:40815:40948 [2] NCCL INFO comm 0x150d9c005fc0 rank 54 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n5:40813:40946 [0] NCCL INFO comm 0x14fff0005f50 rank 52 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n6:58314:58444 [2] NCCL INFO comm 0x14c83815e010 rank 58 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r6i4n5:37341:37558 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n7:8760:8897 [0] NCCL INFO Channel 03 : 60[1a000] -> 61[1c000] via P2P/IPC +r6i4n5:37341:37558 [2] NCCL INFO comm 0x14a168005fc0 rank 2 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n7:8762:8894 [2] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n7:8763:8895 [3] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n7:8763:8895 [3] NCCL INFO comm 0x154ca0005fc0 rank 63 nranks 64 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n7:8761:8896 [1] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n7:8761:8896 [1] NCCL INFO comm 0x154668005fc0 rank 61 nranks 64 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n7:8762:8894 [2] NCCL INFO comm 0x145be8005fc0 rank 62 nranks 64 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n7:8760:8897 [0] NCCL INFO 4 coll channels, 4 p2p channels, 1 p2p channels per peer +r9i1n7:8760:8897 [0] NCCL INFO comm 0x14db4015f010 rank 60 nranks 64 cudaDev 0 busId 1a000 - Init COMPLETE +done with setups ... +time (ms) | model and optimizer: 2483.04 | train/valid/test data iterators: 766.34 +training ... +r6i4n5:37339:37991 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r6i4n5:37341:37993 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r6i4n5:37340:37992 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r6i4n5:37342:37994 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r6i4n5:37339:37991 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r6i4n5:37339:37991 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r6i4n5:37339:37991 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r6i4n5:37339:37991 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r6i4n5:37339:37991 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r6i4n5:37341:37993 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r6i4n5:37340:37992 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r6i4n5:37342:37994 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r6i4n5:37339:37991 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r6i4n5:37339:37991 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r6i4n5:37341:37993 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r6i4n5:37340:37992 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r6i4n5:37339:37991 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r6i4n5:37342:37994 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r6i4n5:37339:37991 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r6i4n5:37339:37991 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r6i4n5:37339:37991 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r6i4n5:37339:37991 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r6i4n5:37339:37991 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r6i4n5:37339:37991 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r6i4n5:37340:37992 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r6i4n5:37341:37993 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r6i4n5:37339:37991 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r6i4n5:37342:37994 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37340:37992 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r6i4n5:37340:37992 [1] NCCL INFO comm 0x154010163f50 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r6i4n5:37341:37993 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r6i4n5:37341:37993 [2] NCCL INFO comm 0x14a168163a60 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r6i4n5:37342:37994 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r6i4n5:37342:37994 [3] NCCL INFO comm 0x15289c163ee0 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r6i4n5:37339:37991 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r6i4n5:37339:37991 [0] NCCL INFO comm 0x147a88001060 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r6i4n5:37339:37339 [0] NCCL INFO Launch mode Parallel +[2021-05-27 19:49:25,130] [INFO] [checkpointing.py:400:forward] Activation Checkpointing Information +[2021-05-27 19:49:25,130] [INFO] [checkpointing.py:401:forward] ----Partition Activations False, CPU CHECKPOINTING False +[2021-05-27 19:49:25,130] [INFO] [checkpointing.py:404:forward] ----contiguous Memory Checkpointing False with None total layers +[2021-05-27 19:49:25,130] [INFO] [checkpointing.py:407:forward] ----Synchronization False +[2021-05-27 19:49:25,130] [INFO] [checkpointing.py:408:forward] ----Profiling time in checkpointing False +r7i1n3:4941:5083 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i1n3:4944:5086 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i1n3:4943:5082 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i1n3:4942:5085 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i1n3:4941:5083 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i1n3:4941:5083 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i1n3:4941:5083 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i1n3:4941:5083 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i1n3:4941:5083 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i1n3:4944:5086 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i1n3:4943:5082 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i1n3:4941:5083 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i1n3:4942:5085 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i1n3:4941:5083 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i1n3:4944:5086 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i1n3:4943:5082 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i1n3:4941:5083 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i1n3:4941:5083 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i1n3:4942:5085 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i1n3:4941:5083 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i1n3:4941:5083 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i1n3:4941:5083 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i1n3:4941:5083 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i1n3:4941:5083 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i1n3:4942:5085 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4943:5082 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i1n3:4941:5083 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i1n3:4944:5086 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4942:5085 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i1n3:4942:5085 [1] NCCL INFO comm 0x14e62c163f50 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i1n3:4943:5082 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i1n3:4943:5082 [2] NCCL INFO comm 0x153094163f50 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i1n3:4944:5086 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i1n3:4941:5083 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i1n3:4944:5086 [3] NCCL INFO comm 0x145160163ee0 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i1n3:4941:5083 [0] NCCL INFO comm 0x151470005fc0 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i1n3:4941:4941 [0] NCCL INFO Launch mode Parallel +r7i1n3:4941:5092 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i1n3:4942:5095 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i1n3:4943:5093 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i1n3:4944:5094 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i1n3:4941:5092 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i1n3:4941:5092 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i1n3:4941:5092 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i1n3:4944:5094 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i1n3:4941:5092 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i1n3:4943:5093 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i1n3:4942:5095 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i1n3:4941:5092 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i1n3:4944:5094 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i1n3:4941:5092 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i1n3:4943:5093 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i1n3:4942:5095 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i1n3:4941:5092 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i1n3:4941:5092 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i1n3:4941:5092 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i1n3:4941:5092 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i1n3:4941:5092 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i1n3:4941:5092 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i1n3:4941:5092 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i1n3:4941:5092 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i1n3:4942:5095 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i1n3:4943:5093 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i1n3:4944:5094 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i1n3:4941:5092 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i1n3:4942:5095 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i1n3:4942:5095 [1] NCCL INFO comm 0x14e62c51ba00 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i1n3:4943:5093 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i1n3:4943:5093 [2] NCCL INFO comm 0x153094523210 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i1n3:4944:5094 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i1n3:4941:5092 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i1n3:4944:5094 [3] NCCL INFO comm 0x145160519540 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i1n3:4941:5092 [0] NCCL INFO comm 0x151474162b10 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i1n3:4941:4941 [0] NCCL INFO Launch mode Parallel +r7i2n6:892:1032 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i2n6:895:1034 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i2n6:893:1033 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i2n6:892:1032 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i2n6:894:1035 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i2n6:892:1032 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i2n6:892:1032 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i2n6:892:1032 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i2n6:892:1032 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i2n6:895:1034 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i2n6:893:1033 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i2n6:892:1032 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i2n6:894:1035 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i2n6:892:1032 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i2n6:893:1033 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i2n6:895:1034 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i2n6:892:1032 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i2n6:892:1032 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i2n6:894:1035 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i2n6:892:1032 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i2n6:892:1032 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i2n6:892:1032 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i2n6:892:1032 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i2n6:892:1032 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i2n6:893:1033 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:894:1035 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i2n6:892:1032 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i2n6:895:1034 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:893:1033 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i2n6:893:1033 [1] NCCL INFO comm 0x1532f8402f60 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i2n6:894:1035 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i2n6:894:1035 [2] NCCL INFO comm 0x152974402080 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i2n6:892:1032 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i2n6:895:1034 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i2n6:892:1032 [0] NCCL INFO comm 0x151b3c001060 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i2n6:892:892 [0] NCCL INFO Launch mode Parallel +r7i2n6:895:1034 [3] NCCL INFO comm 0x149ae4402e20 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i2n6:892:1041 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i2n6:895:1042 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i2n6:894:1044 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i2n6:893:1043 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i2n6:892:1041 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i2n6:892:1041 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i2n6:892:1041 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i2n6:892:1041 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i2n6:892:1041 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i2n6:894:1044 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i2n6:895:1042 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i2n6:893:1043 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i2n6:892:1041 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i2n6:892:1041 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i2n6:895:1042 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i2n6:894:1044 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i2n6:893:1043 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i2n6:892:1041 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i2n6:892:1041 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i2n6:892:1041 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i2n6:892:1041 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i2n6:892:1041 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i2n6:892:1041 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i2n6:892:1041 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i2n6:894:1044 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i2n6:894:1044 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i2n6:895:1042 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i2n6:892:1041 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i2n6:895:1042 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i2n6:893:1043 [1] NCCL INFO comm 0x1532f87ba4d0 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i2n6:894:1044 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i2n6:894:1044 [2] NCCL INFO comm 0x1529747d3010 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i2n6:892:1041 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i2n6:895:1042 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i2n6:895:1042 [3] NCCL INFO comm 0x149ae47b7b90 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i2n6:892:1041 [0] NCCL INFO comm 0x151b34402b20 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i2n6:892:892 [0] NCCL INFO Launch mode Parallel +r7i3n0:38598:38740 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i3n0:38601:38742 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i3n0:38600:38741 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i3n0:38599:38743 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i3n0:38598:38740 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i3n0:38598:38740 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i3n0:38598:38740 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i3n0:38598:38740 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i3n0:38601:38742 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i3n0:38600:38741 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i3n0:38599:38743 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i3n0:38598:38740 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i3n0:38598:38740 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i3n0:38601:38742 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i3n0:38599:38743 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i3n0:38600:38741 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i3n0:38598:38740 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i3n0:38598:38740 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i3n0:38598:38740 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i3n0:38598:38740 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i3n0:38598:38740 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i3n0:38598:38740 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i3n0:38598:38740 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i3n0:38598:38740 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i3n0:38599:38743 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38741 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i3n0:38601:38742 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38598:38740 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i3n0:38599:38743 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i3n0:38599:38743 [1] NCCL INFO comm 0x14da18163f50 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i3n0:38600:38741 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i3n0:38600:38741 [2] NCCL INFO comm 0x145220162f50 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i3n0:38601:38742 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i3n0:38598:38740 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i3n0:38601:38742 [3] NCCL INFO comm 0x148620163ee0 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i3n0:38598:38740 [0] NCCL INFO comm 0x14eaec005fc0 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i3n0:38598:38598 [0] NCCL INFO Launch mode Parallel +r7i3n0:38598:38749 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i3n0:38599:38751 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i3n0:38601:38750 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i3n0:38598:38749 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i3n0:38600:38752 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i3n0:38598:38749 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i3n0:38598:38749 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i3n0:38598:38749 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i3n0:38598:38749 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i3n0:38599:38751 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i3n0:38601:38750 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i3n0:38600:38752 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i3n0:38598:38749 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i3n0:38598:38749 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i3n0:38599:38751 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i3n0:38601:38750 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i3n0:38600:38752 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i3n0:38598:38749 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i3n0:38598:38749 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i3n0:38598:38749 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i3n0:38598:38749 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i3n0:38598:38749 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i3n0:38598:38749 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i3n0:38598:38749 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i3n0:38599:38751 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i3n0:38600:38752 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i3n0:38601:38750 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i3n0:38598:38749 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i3n0:38599:38751 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i3n0:38599:38751 [1] NCCL INFO comm 0x14da1851ba00 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i3n0:38600:38752 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i3n0:38600:38752 [2] NCCL INFO comm 0x145220535010 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i3n0:38601:38750 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i3n0:38598:38749 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i3n0:38601:38750 [3] NCCL INFO comm 0x148620519540 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i3n0:38598:38749 [0] NCCL INFO comm 0x14eacc163b10 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i3n0:38598:38598 [0] NCCL INFO Launch mode Parallel +r7i4n4:1766:1959 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i4n4:1768:1958 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n4:1769:1960 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n4:1767:1961 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n4:1766:1959 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i4n4:1766:1959 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i4n4:1766:1959 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i4n4:1766:1959 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i4n4:1769:1960 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i4n4:1768:1958 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i4n4:1766:1959 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i4n4:1767:1961 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i4n4:1766:1959 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i4n4:1769:1960 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i4n4:1768:1958 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n4:1766:1959 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i4n4:1766:1959 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i4n4:1767:1961 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n4:1766:1959 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i4n4:1766:1959 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i4n4:1766:1959 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i4n4:1766:1959 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n4:1766:1959 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i4n4:1766:1959 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i4n4:1767:1961 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1768:1958 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n4:1766:1959 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n4:1769:1960 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1767:1961 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n4:1767:1961 [1] NCCL INFO comm 0x1498e4402f60 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i4n4:1768:1958 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n4:1768:1958 [2] NCCL INFO comm 0x153830402080 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n4:1766:1959 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n4:1769:1960 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n4:1766:1959 [0] NCCL INFO comm 0x14e390001060 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n4:1766:1766 [0] NCCL INFO Launch mode Parallel +r7i4n4:1769:1960 [3] NCCL INFO comm 0x149e18402f30 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i4n4:1766:1967 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i4n4:1767:1969 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n4:1769:1970 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n4:1768:1968 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n4:1766:1967 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i4n4:1766:1967 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i4n4:1766:1967 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i4n4:1766:1967 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i4n4:1767:1969 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i4n4:1766:1967 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i4n4:1769:1970 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i4n4:1768:1968 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i4n4:1766:1967 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i4n4:1767:1969 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n4:1769:1970 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i4n4:1766:1967 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i4n4:1768:1968 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n4:1766:1967 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i4n4:1766:1967 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i4n4:1766:1967 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i4n4:1766:1967 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i4n4:1766:1967 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n4:1766:1967 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i4n4:1766:1967 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i4n4:1767:1969 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n4:1768:1968 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n4:1769:1970 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n4:1766:1967 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n4:1767:1969 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n4:1767:1969 [1] NCCL INFO comm 0x1498e47ba4d0 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i4n4:1768:1968 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n4:1768:1968 [2] NCCL INFO comm 0x1538307d3010 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n4:1769:1970 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n4:1766:1967 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n4:1769:1970 [3] NCCL INFO comm 0x149e187b7c50 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i4n4:1766:1967 [0] NCCL INFO comm 0x14e388402b20 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n4:1766:1766 [0] NCCL INFO Launch mode Parallel +r7i4n5:46158:46298 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i4n5:46160:46300 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n5:46159:46299 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n5:46161:46301 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n5:46158:46298 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i4n5:46158:46298 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i4n5:46158:46298 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i4n5:46160:46300 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i4n5:46159:46299 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i4n5:46158:46298 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i4n5:46161:46301 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i4n5:46158:46298 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i4n5:46160:46300 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n5:46159:46299 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n5:46161:46301 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i4n5:46158:46298 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i4n5:46158:46298 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i4n5:46158:46298 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i4n5:46158:46298 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i4n5:46158:46298 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i4n5:46158:46298 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i4n5:46158:46298 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n5:46158:46298 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i4n5:46158:46298 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i4n5:46159:46299 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n5:46160:46300 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n5:46161:46301 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46158:46298 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n5:46159:46299 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n5:46159:46299 [1] NCCL INFO comm 0x14ddb0163f50 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i4n5:46160:46300 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n5:46160:46300 [2] NCCL INFO comm 0x14a654162f50 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n5:46161:46301 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n5:46158:46298 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n5:46161:46301 [3] NCCL INFO comm 0x154318163ee0 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i4n5:46158:46298 [0] NCCL INFO comm 0x14eaac005fc0 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n5:46158:46158 [0] NCCL INFO Launch mode Parallel +r7i4n5:46158:46310 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i4n5:46159:46307 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n5:46160:46308 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n5:46161:46309 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n5:46158:46310 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i4n5:46158:46310 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i4n5:46158:46310 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i4n5:46158:46310 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i4n5:46158:46310 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i4n5:46159:46307 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i4n5:46160:46308 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i4n5:46161:46309 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i4n5:46158:46310 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i4n5:46158:46310 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i4n5:46159:46307 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n5:46161:46309 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i4n5:46160:46308 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n5:46158:46310 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i4n5:46158:46310 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i4n5:46158:46310 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i4n5:46158:46310 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i4n5:46158:46310 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i4n5:46158:46310 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i4n5:46158:46310 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i4n5:46159:46307 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i4n5:46160:46308 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i4n5:46161:46309 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i4n5:46158:46310 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i4n5:46159:46307 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n5:46159:46307 [1] NCCL INFO comm 0x14ddb051ba00 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i4n5:46160:46308 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n5:46160:46308 [2] NCCL INFO comm 0x14a654535010 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n5:46161:46309 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n5:46158:46310 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i4n5:46161:46309 [3] NCCL INFO comm 0x154318519540 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i4n5:46158:46310 [0] NCCL INFO comm 0x14ea8c163b10 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n5:46158:46158 [0] NCCL INFO Launch mode Parallel +r7i5n3:79994:80134 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i5n3:79994:80134 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i5n3:79995:80136 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i5n3:79996:80137 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i5n3:79997:80135 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i5n3:79994:80134 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i5n3:79994:80134 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i5n3:79994:80134 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i5n3:79994:80134 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i5n3:79994:80134 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i5n3:79994:80134 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i5n3:79995:80136 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i5n3:79996:80137 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i5n3:79997:80135 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i5n3:79994:80134 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i5n3:79994:80134 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i5n3:79995:80136 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i5n3:79996:80137 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i5n3:79994:80134 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i5n3:79997:80135 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i5n3:79994:80134 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i5n3:79994:80134 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i5n3:79994:80134 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i5n3:79994:80134 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i5n3:79995:80136 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79996:80137 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i5n3:79994:80134 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i5n3:79997:80135 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79995:80136 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i5n3:79996:80137 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i5n3:79995:80136 [1] NCCL INFO comm 0x15059c402f60 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i5n3:79996:80137 [2] NCCL INFO comm 0x14f6ec402080 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i5n3:79994:80134 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i5n3:79997:80135 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i5n3:79994:80134 [0] NCCL INFO comm 0x14ceb8001060 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i5n3:79994:79994 [0] NCCL INFO Launch mode Parallel +r7i5n3:79997:80135 [3] NCCL INFO comm 0x1512cc402ef0 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i5n3:79994:80143 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i5n3:79997:80145 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i5n3:79995:80144 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i5n3:79996:80146 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i5n3:79994:80143 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i5n3:79994:80143 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i5n3:79994:80143 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i5n3:79994:80143 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i5n3:79994:80143 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i5n3:79997:80145 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i5n3:79995:80144 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i5n3:79996:80146 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i5n3:79994:80143 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i5n3:79994:80143 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i5n3:79997:80145 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i5n3:79995:80144 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i5n3:79996:80146 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i5n3:79994:80143 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i5n3:79994:80143 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i5n3:79994:80143 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i5n3:79994:80143 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i5n3:79994:80143 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i5n3:79994:80143 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i5n3:79994:80143 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i5n3:79995:80144 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i5n3:79996:80146 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i5n3:79994:80143 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i5n3:79997:80145 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i5n3:79995:80144 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i5n3:79995:80144 [1] NCCL INFO comm 0x15059c7ba4d0 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i5n3:79996:80146 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i5n3:79996:80146 [2] NCCL INFO comm 0x14f6ec7d3010 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i5n3:79994:80143 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i5n3:79997:80145 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i5n3:79994:80143 [0] NCCL INFO comm 0x14ceb0402b20 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i5n3:79994:79994 [0] NCCL INFO Launch mode Parallel +r7i5n3:79997:80145 [3] NCCL INFO comm 0x1512cc7b7930 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i6n8:29152:29292 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i6n8:29154:29293 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i6n8:29155:29294 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i6n8:29153:29295 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i6n8:29152:29292 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i6n8:29152:29292 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i6n8:29152:29292 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i6n8:29152:29292 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i6n8:29154:29293 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i6n8:29152:29292 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i6n8:29153:29295 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i6n8:29155:29294 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i6n8:29152:29292 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i6n8:29152:29292 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i6n8:29154:29293 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i6n8:29153:29295 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i6n8:29155:29294 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i6n8:29152:29292 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i6n8:29152:29292 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i6n8:29152:29292 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i6n8:29152:29292 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i6n8:29152:29292 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i6n8:29152:29292 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i6n8:29152:29292 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i6n8:29153:29295 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i6n8:29155:29294 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29293 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i6n8:29152:29292 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i6n8:29155:29294 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29153:29295 [1] NCCL INFO comm 0x154104163f50 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i6n8:29154:29293 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i6n8:29154:29293 [2] NCCL INFO comm 0x147c84162f50 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i6n8:29152:29292 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i6n8:29155:29294 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i6n8:29152:29292 [0] NCCL INFO comm 0x1500e8005fc0 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i6n8:29152:29152 [0] NCCL INFO Launch mode Parallel +r7i6n8:29155:29294 [3] NCCL INFO comm 0x14bc2c163ee0 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i6n8:29152:29301 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i6n8:29154:29302 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i6n8:29155:29303 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i6n8:29153:29304 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i6n8:29152:29301 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i6n8:29152:29301 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i6n8:29152:29301 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i6n8:29152:29301 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i6n8:29154:29302 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i6n8:29155:29303 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i6n8:29153:29304 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i6n8:29152:29301 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i6n8:29152:29301 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i6n8:29154:29302 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i6n8:29153:29304 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i6n8:29155:29303 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i6n8:29152:29301 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i6n8:29152:29301 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i6n8:29152:29301 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i6n8:29152:29301 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i6n8:29152:29301 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i6n8:29152:29301 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i6n8:29152:29301 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i6n8:29152:29301 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i6n8:29155:29303 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i6n8:29154:29302 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i6n8:29155:29303 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i6n8:29152:29301 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i6n8:29153:29304 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i6n8:29153:29304 [1] NCCL INFO comm 0x15410451ba00 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i6n8:29154:29302 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i6n8:29154:29302 [2] NCCL INFO comm 0x147c84535010 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i6n8:29155:29303 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i6n8:29152:29301 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i6n8:29155:29303 [3] NCCL INFO comm 0x14bc2c519540 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i6n8:29152:29301 [0] NCCL INFO comm 0x1500c8163aa0 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i6n8:29152:29152 [0] NCCL INFO Launch mode Parallel +r7i7n0:55673:55813 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i7n0:55674:55815 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n0:55676:55814 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n0:55675:55816 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n0:55673:55813 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i7n0:55673:55813 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i7n0:55673:55813 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i7n0:55673:55813 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i7n0:55674:55815 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i7n0:55676:55814 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i7n0:55673:55813 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i7n0:55675:55816 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i7n0:55673:55813 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i7n0:55674:55815 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n0:55676:55814 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n0:55673:55813 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i7n0:55675:55816 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n0:55673:55813 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i7n0:55673:55813 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i7n0:55673:55813 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i7n0:55673:55813 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i7n0:55673:55813 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n0:55673:55813 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i7n0:55673:55813 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n0:55674:55815 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55675:55816 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n0:55673:55813 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n0:55676:55814 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55674:55815 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n0:55674:55815 [1] NCCL INFO comm 0x14db28402f60 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n0:55675:55816 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n0:55675:55816 [2] NCCL INFO comm 0x148d64402080 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n0:55673:55813 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n0:55676:55814 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n0:55673:55813 [0] NCCL INFO comm 0x148ec8001060 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n0:55673:55673 [0] NCCL INFO Launch mode Parallel +r7i7n0:55676:55814 [3] NCCL INFO comm 0x150d08402f30 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n0:55673:55822 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i7n0:55674:55824 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n0:55675:55823 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n0:55676:55825 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n0:55673:55822 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i7n0:55673:55822 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i7n0:55673:55822 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i7n0:55673:55822 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i7n0:55673:55822 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i7n0:55674:55824 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i7n0:55673:55822 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i7n0:55675:55823 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i7n0:55676:55825 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i7n0:55673:55822 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i7n0:55674:55824 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n0:55673:55822 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i7n0:55675:55823 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n0:55676:55825 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n0:55673:55822 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i7n0:55673:55822 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i7n0:55673:55822 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i7n0:55673:55822 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n0:55673:55822 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i7n0:55673:55822 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n0:55673:55822 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n0:55675:55823 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n0:55676:55825 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n0:55673:55822 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n0:55674:55824 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n0:55674:55824 [1] NCCL INFO comm 0x14db287ba4f0 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n0:55675:55823 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n0:55675:55823 [2] NCCL INFO comm 0x148d647d3010 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n0:55676:55825 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n0:55673:55822 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n0:55676:55825 [3] NCCL INFO comm 0x150d087b7c50 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n0:55673:55822 [0] NCCL INFO comm 0x148ec0402b20 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n0:55673:55673 [0] NCCL INFO Launch mode Parallel +r7i7n1:68404:68545 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i7n1:68406:68548 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n1:68405:68547 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n1:68407:68546 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n1:68404:68545 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i7n1:68404:68545 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i7n1:68404:68545 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i7n1:68404:68545 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i7n1:68407:68546 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i7n1:68405:68547 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i7n1:68406:68548 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i7n1:68404:68545 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i7n1:68404:68545 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i7n1:68405:68547 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n1:68407:68546 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n1:68404:68545 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i7n1:68406:68548 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n1:68404:68545 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i7n1:68404:68545 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i7n1:68404:68545 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i7n1:68404:68545 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i7n1:68404:68545 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n1:68404:68545 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i7n1:68404:68545 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n1:68405:68547 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68406:68548 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n1:68404:68545 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n1:68407:68546 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68405:68547 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n1:68405:68547 [1] NCCL INFO comm 0x14aa28163f50 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n1:68406:68548 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n1:68406:68548 [2] NCCL INFO comm 0x153cbc162f50 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n1:68407:68546 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n1:68404:68545 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n1:68407:68546 [3] NCCL INFO comm 0x14d4f4163ee0 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n1:68404:68545 [0] NCCL INFO comm 0x145214005fc0 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n1:68404:68404 [0] NCCL INFO Launch mode Parallel +r7i7n1:68404:68554 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i7n1:68406:68557 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n1:68405:68556 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n1:68407:68555 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n1:68404:68554 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i7n1:68404:68554 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i7n1:68404:68554 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i7n1:68404:68554 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i7n1:68404:68554 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i7n1:68406:68557 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i7n1:68405:68556 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i7n1:68407:68555 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i7n1:68404:68554 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i7n1:68404:68554 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i7n1:68406:68557 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n1:68405:68556 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n1:68404:68554 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i7n1:68407:68555 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n1:68404:68554 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i7n1:68404:68554 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i7n1:68404:68554 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i7n1:68404:68554 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n1:68404:68554 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i7n1:68404:68554 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n1:68405:68556 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n1:68406:68557 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n1:68407:68555 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n1:68404:68554 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n1:68405:68556 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n1:68405:68556 [1] NCCL INFO comm 0x14aa2851ba00 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n1:68406:68557 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n1:68407:68555 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n1:68404:68554 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n1:68407:68555 [3] NCCL INFO comm 0x14d4f4519540 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n1:68406:68557 [2] NCCL INFO comm 0x153cbc535010 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n1:68404:68554 [0] NCCL INFO comm 0x1451f4163b10 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n1:68404:68404 [0] NCCL INFO Launch mode Parallel +r7i7n2:57608:57750 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i7n2:57610:57749 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n2:57609:57751 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n2:57611:57748 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n2:57608:57750 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i7n2:57608:57750 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i7n2:57608:57750 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i7n2:57608:57750 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i7n2:57610:57749 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i7n2:57609:57751 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i7n2:57608:57750 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i7n2:57611:57748 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i7n2:57608:57750 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i7n2:57610:57749 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n2:57609:57751 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n2:57611:57748 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n2:57608:57750 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i7n2:57608:57750 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i7n2:57608:57750 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i7n2:57608:57750 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i7n2:57608:57750 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i7n2:57608:57750 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n2:57608:57750 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i7n2:57608:57750 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n2:57609:57751 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57749 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n2:57611:57748 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57608:57750 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n2:57609:57751 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n2:57609:57751 [1] NCCL INFO comm 0x14c9e8402f60 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n2:57610:57749 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n2:57610:57749 [2] NCCL INFO comm 0x14f1c4402080 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n2:57611:57748 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n2:57608:57750 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n2:57611:57748 [3] NCCL INFO comm 0x1502d8402e20 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n2:57608:57750 [0] NCCL INFO comm 0x14fb34001060 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n2:57608:57608 [0] NCCL INFO Launch mode Parallel +r7i7n2:57608:57791 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r7i7n2:57610:57792 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n2:57609:57793 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n2:57611:57794 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n2:57608:57791 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r7i7n2:57608:57791 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r7i7n2:57608:57791 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r7i7n2:57608:57791 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r7i7n2:57608:57791 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r7i7n2:57610:57792 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r7i7n2:57609:57793 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r7i7n2:57611:57794 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r7i7n2:57608:57791 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r7i7n2:57608:57791 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r7i7n2:57610:57792 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n2:57609:57793 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n2:57611:57794 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n2:57608:57791 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r7i7n2:57608:57791 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r7i7n2:57608:57791 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r7i7n2:57608:57791 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r7i7n2:57608:57791 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r7i7n2:57608:57791 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r7i7n2:57608:57791 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n2:57609:57793 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r7i7n2:57610:57792 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r7i7n2:57608:57791 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r7i7n2:57611:57794 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r7i7n2:57609:57793 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n2:57609:57793 [1] NCCL INFO comm 0x14c9e87ba4f0 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n2:57610:57792 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n2:57610:57792 [2] NCCL INFO comm 0x14f1c47d3010 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n2:57608:57791 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n2:57611:57794 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r7i7n2:57608:57791 [0] NCCL INFO comm 0x14fb2c402b20 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n2:57608:57608 [0] NCCL INFO Launch mode Parallel +r7i7n2:57611:57794 [3] NCCL INFO comm 0x1502d87b7b90 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r8i0n3:57932:58073 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r8i0n3:57933:58076 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r8i0n3:57934:58074 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r8i0n3:57935:58075 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r8i0n3:57932:58073 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r8i0n3:57932:58073 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r8i0n3:57932:58073 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r8i0n3:57932:58073 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r8i0n3:57933:58076 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r8i0n3:57932:58073 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r8i0n3:57935:58075 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r8i0n3:57932:58073 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r8i0n3:57934:58074 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r8i0n3:57933:58076 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r8i0n3:57932:58073 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r8i0n3:57935:58075 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r8i0n3:57932:58073 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r8i0n3:57932:58073 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r8i0n3:57934:58074 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r8i0n3:57932:58073 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r8i0n3:57932:58073 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r8i0n3:57932:58073 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r8i0n3:57932:58073 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r8i0n3:57932:58073 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r8i0n3:57933:58076 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58074 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r8i0n3:57935:58075 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58073 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r8i0n3:57933:58076 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r8i0n3:57933:58076 [1] NCCL INFO comm 0x15076c163f50 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r8i0n3:57934:58074 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r8i0n3:57934:58074 [2] NCCL INFO comm 0x15469c162f50 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r8i0n3:57935:58075 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r8i0n3:57932:58073 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r8i0n3:57935:58075 [3] NCCL INFO comm 0x151da4163ee0 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r8i0n3:57932:58073 [0] NCCL INFO comm 0x1526d0005fc0 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r8i0n3:57932:57932 [0] NCCL INFO Launch mode Parallel +r8i0n3:57932:58082 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r8i0n3:57933:58084 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r8i0n3:57934:58085 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r8i0n3:57935:58083 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r8i0n3:57932:58082 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r8i0n3:57932:58082 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r8i0n3:57932:58082 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r8i0n3:57932:58082 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r8i0n3:57932:58082 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r8i0n3:57933:58084 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r8i0n3:57934:58085 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r8i0n3:57935:58083 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r8i0n3:57932:58082 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r8i0n3:57932:58082 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r8i0n3:57933:58084 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r8i0n3:57934:58085 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r8i0n3:57935:58083 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r8i0n3:57932:58082 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r8i0n3:57932:58082 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r8i0n3:57932:58082 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r8i0n3:57932:58082 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r8i0n3:57932:58082 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r8i0n3:57932:58082 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r8i0n3:57932:58082 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r8i0n3:57933:58084 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r8i0n3:57934:58085 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r8i0n3:57935:58083 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r8i0n3:57932:58082 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r8i0n3:57933:58084 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r8i0n3:57933:58084 [1] NCCL INFO comm 0x15076c51ba00 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r8i0n3:57934:58085 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r8i0n3:57934:58085 [2] NCCL INFO comm 0x15469c535010 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r8i0n3:57935:58083 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r8i0n3:57932:58082 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r8i0n3:57935:58083 [3] NCCL INFO comm 0x151da4519540 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r8i0n3:57932:58082 [0] NCCL INFO comm 0x1526b0163b10 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r8i0n3:57932:57932 [0] NCCL INFO Launch mode Parallel +r9i1n4:24818:24960 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r9i1n4:24820:24962 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n4:24821:24963 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n4:24819:24961 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n4:24818:24960 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r9i1n4:24818:24960 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r9i1n4:24818:24960 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r9i1n4:24818:24960 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r9i1n4:24820:24962 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r9i1n4:24821:24963 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r9i1n4:24819:24961 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r9i1n4:24818:24960 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r9i1n4:24818:24960 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r9i1n4:24820:24962 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n4:24818:24960 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r9i1n4:24821:24963 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n4:24819:24961 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n4:24818:24960 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r9i1n4:24818:24960 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r9i1n4:24818:24960 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r9i1n4:24818:24960 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r9i1n4:24818:24960 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n4:24818:24960 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r9i1n4:24818:24960 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n4:24821:24963 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n4:24820:24962 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n4:24818:24960 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n4:24821:24963 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24819:24961 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n4:24819:24961 [1] NCCL INFO comm 0x14f144402f60 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n4:24820:24962 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n4:24820:24962 [2] NCCL INFO comm 0x151d78402080 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n4:24818:24960 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n4:24821:24963 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n4:24818:24960 [0] NCCL INFO comm 0x150ed8001060 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n4:24821:24963 [3] NCCL INFO comm 0x150ddc402f30 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n4:24818:24818 [0] NCCL INFO Launch mode Parallel +r9i1n4:24818:24969 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r9i1n4:24820:24971 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n4:24819:24970 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n4:24821:24972 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n4:24818:24969 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r9i1n4:24818:24969 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r9i1n4:24818:24969 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r9i1n4:24820:24971 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r9i1n4:24818:24969 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r9i1n4:24819:24970 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r9i1n4:24821:24972 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r9i1n4:24818:24969 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r9i1n4:24820:24971 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n4:24818:24969 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r9i1n4:24819:24970 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n4:24821:24972 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n4:24818:24969 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r9i1n4:24818:24969 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r9i1n4:24818:24969 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r9i1n4:24818:24969 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r9i1n4:24818:24969 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r9i1n4:24818:24969 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n4:24818:24969 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r9i1n4:24818:24969 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n4:24819:24970 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n4:24820:24971 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n4:24821:24972 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n4:24818:24969 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n4:24821:24972 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n4:24819:24970 [1] NCCL INFO comm 0x14f1447ba4d0 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n4:24820:24971 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n4:24820:24971 [2] NCCL INFO comm 0x151d787d3010 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n4:24818:24969 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n4:24821:24972 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n4:24818:24969 [0] NCCL INFO comm 0x150ed0402b20 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n4:24821:24972 [3] NCCL INFO comm 0x150ddc7b7c50 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n4:24818:24818 [0] NCCL INFO Launch mode Parallel +r9i1n5:40813:40956 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r9i1n5:40815:40959 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n5:40816:40957 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n5:40814:40958 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n5:40813:40956 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r9i1n5:40813:40956 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r9i1n5:40813:40956 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r9i1n5:40813:40956 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r9i1n5:40815:40959 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r9i1n5:40816:40957 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r9i1n5:40813:40956 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r9i1n5:40814:40958 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r9i1n5:40813:40956 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r9i1n5:40815:40959 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n5:40816:40957 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n5:40814:40958 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n5:40813:40956 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r9i1n5:40813:40956 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r9i1n5:40813:40956 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r9i1n5:40813:40956 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r9i1n5:40813:40956 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r9i1n5:40813:40956 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n5:40813:40956 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r9i1n5:40813:40956 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n5:40814:40958 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n5:40815:40959 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n5:40816:40957 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40813:40956 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40958 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n5:40814:40958 [1] NCCL INFO comm 0x145bfc163f50 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n5:40815:40959 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n5:40815:40959 [2] NCCL INFO comm 0x150d9c162f50 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n5:40816:40957 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n5:40813:40956 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n5:40816:40957 [3] NCCL INFO comm 0x152a04163ee0 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n5:40813:40956 [0] NCCL INFO comm 0x150010005fc0 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n5:40813:40813 [0] NCCL INFO Launch mode Parallel +r9i1n5:40813:40965 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r9i1n5:40816:40968 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n5:40814:40967 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n5:40815:40966 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n5:40813:40965 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r9i1n5:40813:40965 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r9i1n5:40813:40965 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r9i1n5:40813:40965 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r9i1n5:40814:40967 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r9i1n5:40815:40966 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r9i1n5:40816:40968 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r9i1n5:40813:40965 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r9i1n5:40813:40965 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r9i1n5:40814:40967 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n5:40815:40966 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n5:40816:40968 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n5:40813:40965 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r9i1n5:40813:40965 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r9i1n5:40813:40965 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r9i1n5:40813:40965 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r9i1n5:40813:40965 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r9i1n5:40813:40965 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n5:40813:40965 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r9i1n5:40813:40965 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n5:40814:40967 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n5:40815:40966 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n5:40816:40968 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n5:40813:40965 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n5:40816:40968 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n5:40814:40967 [1] NCCL INFO comm 0x145bfc51ba00 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n5:40815:40966 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n5:40815:40966 [2] NCCL INFO comm 0x150d9c535010 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n5:40813:40965 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n5:40816:40968 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n5:40813:40965 [0] NCCL INFO comm 0x14fff0163b10 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n5:40813:40813 [0] NCCL INFO Launch mode Parallel +r9i1n5:40816:40968 [3] NCCL INFO comm 0x152a04519540 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n6:58312:58462 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r9i1n6:58315:58464 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n6:58314:58463 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n6:58313:58465 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n6:58312:58462 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r9i1n6:58312:58462 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r9i1n6:58312:58462 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r9i1n6:58312:58462 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r9i1n6:58315:58464 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r9i1n6:58314:58463 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r9i1n6:58312:58462 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r9i1n6:58313:58465 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r9i1n6:58312:58462 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r9i1n6:58315:58464 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n6:58314:58463 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n6:58313:58465 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n6:58312:58462 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r9i1n6:58312:58462 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r9i1n6:58312:58462 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r9i1n6:58312:58462 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r9i1n6:58312:58462 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r9i1n6:58312:58462 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n6:58312:58462 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r9i1n6:58312:58462 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n6:58315:58464 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n6:58314:58463 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n6:58315:58464 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58312:58462 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n6:58313:58465 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n6:58313:58465 [1] NCCL INFO comm 0x1508a8402f60 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n6:58314:58463 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n6:58314:58463 [2] NCCL INFO comm 0x14c838402080 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n6:58315:58464 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n6:58312:58462 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n6:58315:58464 [3] NCCL INFO comm 0x1451c4402f30 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n6:58312:58462 [0] NCCL INFO comm 0x14a69c001060 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n6:58312:58312 [0] NCCL INFO Launch mode Parallel +r9i1n6:58312:58471 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r9i1n6:58313:58472 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n6:58312:58471 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r9i1n6:58315:58473 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n6:58314:58474 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n6:58312:58471 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r9i1n6:58312:58471 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r9i1n6:58312:58471 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r9i1n6:58312:58471 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r9i1n6:58312:58471 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r9i1n6:58313:58472 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r9i1n6:58314:58474 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r9i1n6:58312:58471 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r9i1n6:58312:58471 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r9i1n6:58313:58472 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n6:58315:58473 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r9i1n6:58312:58471 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r9i1n6:58314:58474 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n6:58312:58471 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r9i1n6:58312:58471 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r9i1n6:58315:58473 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n6:58312:58471 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n6:58312:58471 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r9i1n6:58312:58471 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n6:58314:58474 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n6:58314:58474 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n6:58315:58473 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n6:58312:58471 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n6:58313:58472 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n6:58313:58472 [1] NCCL INFO comm 0x1508a87ba4d0 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n6:58314:58474 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n6:58314:58474 [2] NCCL INFO comm 0x14c8387d3010 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n6:58315:58473 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n6:58312:58471 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n6:58315:58473 [3] NCCL INFO comm 0x1451c47b7c50 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n6:58312:58471 [0] NCCL INFO comm 0x14a694402b20 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n6:58312:58312 [0] NCCL INFO Launch mode Parallel +r9i1n7:8760:9185 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r9i1n7:8762:9186 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n7:8763:9188 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n7:8761:9187 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n7:8760:9185 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r9i1n7:8760:9185 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r9i1n7:8760:9185 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r9i1n7:8760:9185 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r9i1n7:8762:9186 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r9i1n7:8760:9185 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r9i1n7:8763:9188 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r9i1n7:8761:9187 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r9i1n7:8760:9185 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r9i1n7:8760:9185 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r9i1n7:8762:9186 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n7:8763:9188 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n7:8761:9187 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n7:8760:9185 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r9i1n7:8760:9185 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r9i1n7:8760:9185 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r9i1n7:8760:9185 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r9i1n7:8760:9185 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n7:8760:9185 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r9i1n7:8760:9185 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n7:8762:9186 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n7:8760:9185 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n7:8762:9186 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n7:8763:9188 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n7:8760:9185 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n7:8761:9187 [1] NCCL INFO comm 0x154668163f50 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n7:8762:9186 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n7:8762:9186 [2] NCCL INFO comm 0x145be8162f50 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n7:8763:9188 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n7:8760:9185 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n7:8763:9188 [3] NCCL INFO comm 0x154ca0163ee0 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n7:8760:9185 [0] NCCL INFO comm 0x14daf4001060 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n7:8760:8760 [0] NCCL INFO Launch mode Parallel +r9i1n7:8760:9194 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r9i1n7:8761:9195 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n7:8763:9196 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n7:8762:9197 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n7:8760:9194 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r9i1n7:8760:9194 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r9i1n7:8760:9194 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r9i1n7:8760:9194 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r9i1n7:8761:9195 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r9i1n7:8760:9194 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r9i1n7:8763:9196 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r9i1n7:8762:9197 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r9i1n7:8760:9194 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r9i1n7:8761:9195 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n7:8763:9196 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n7:8760:9194 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r9i1n7:8762:9197 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n7:8760:9194 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r9i1n7:8760:9194 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r9i1n7:8760:9194 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r9i1n7:8760:9194 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r9i1n7:8760:9194 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r9i1n7:8760:9194 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r9i1n7:8760:9194 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n7:8761:9195 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r9i1n7:8760:9194 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r9i1n7:8762:9197 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r9i1n7:8763:9196 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n7:8760:9194 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r9i1n7:8761:9195 [1] NCCL INFO comm 0x15466851cf60 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n7:8762:9197 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n7:8763:9196 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n7:8762:9197 [2] NCCL INFO comm 0x145be8536010 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n7:8760:9194 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r9i1n7:8763:9196 [3] NCCL INFO comm 0x154ca0519dc0 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n7:8760:9194 [0] NCCL INFO comm 0x14db00001060 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n7:8760:8760 [0] NCCL INFO Launch mode Parallel +r6i4n5:37339:38000 [0] NCCL INFO Channel 00/12 : 0 1 2 3 +r6i4n5:37340:38003 [1] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r6i4n5:37342:38001 [3] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r6i4n5:37341:38002 [2] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r6i4n5:37339:38000 [0] NCCL INFO Channel 01/12 : 0 1 3 2 +r6i4n5:37339:38000 [0] NCCL INFO Channel 02/12 : 0 2 3 1 +r6i4n5:37339:38000 [0] NCCL INFO Channel 03/12 : 0 2 1 3 +r6i4n5:37339:38000 [0] NCCL INFO Channel 04/12 : 0 3 1 2 +r6i4n5:37340:38003 [1] NCCL INFO Trees [0] 2/-1/-1->1->0|0->1->2/-1/-1 [1] 3/-1/-1->1->-1|-1->1->3/-1/-1 [2] -1/-1/-1->1->3|3->1->-1/-1/-1 [3] 0/-1/-1->1->2|2->1->0/-1/-1 [4] 2/-1/-1->1->0|0->1->2/-1/-1 [5] 3/-1/-1->1->-1|-1->1->3/-1/-1 [6] 2/-1/-1->1->0|0->1->2/-1/-1 [7] 3/-1/-1->1->-1|-1->1->3/-1/-1 [8] -1/-1/-1->1->3|3->1->-1/-1/-1 [9] 0/-1/-1->1->2|2->1->0/-1/-1 [10] 2/-1/-1->1->0|0->1->2/-1/-1 [11] 3/-1/-1->1->-1|-1->1->3/-1/-1 +r6i4n5:37342:38001 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 0/-1/-1->3->1|1->3->0/-1/-1 [2] 1/-1/-1->3->0|0->3->1/-1/-1 [3] 2/-1/-1->3->-1|-1->3->2/-1/-1 [4] -1/-1/-1->3->2|2->3->-1/-1/-1 [5] 0/-1/-1->3->1|1->3->0/-1/-1 [6] -1/-1/-1->3->2|2->3->-1/-1/-1 [7] 0/-1/-1->3->1|1->3->0/-1/-1 [8] 1/-1/-1->3->0|0->3->1/-1/-1 [9] 2/-1/-1->3->-1|-1->3->2/-1/-1 [10] -1/-1/-1->3->2|2->3->-1/-1/-1 [11] 0/-1/-1->3->1|1->3->0/-1/-1 +r6i4n5:37341:38002 [2] NCCL INFO Trees [0] 3/-1/-1->2->1|1->2->3/-1/-1 [1] -1/-1/-1->2->0|0->2->-1/-1/-1 [2] 0/-1/-1->2->-1|-1->2->0/-1/-1 [3] 1/-1/-1->2->3|3->2->1/-1/-1 [4] 3/-1/-1->2->1|1->2->3/-1/-1 [5] -1/-1/-1->2->0|0->2->-1/-1/-1 [6] 3/-1/-1->2->1|1->2->3/-1/-1 [7] -1/-1/-1->2->0|0->2->-1/-1/-1 [8] 0/-1/-1->2->-1|-1->2->0/-1/-1 [9] 1/-1/-1->2->3|3->2->1/-1/-1 [10] 3/-1/-1->2->1|1->2->3/-1/-1 [11] -1/-1/-1->2->0|0->2->-1/-1/-1 +r6i4n5:37339:38000 [0] NCCL INFO Channel 05/12 : 0 3 2 1 +r6i4n5:37339:38000 [0] NCCL INFO Channel 06/12 : 0 1 2 3 +r6i4n5:37340:38003 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r6i4n5:37342:38001 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r6i4n5:37341:38002 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r6i4n5:37339:38000 [0] NCCL INFO Channel 07/12 : 0 1 3 2 +r6i4n5:37339:38000 [0] NCCL INFO Channel 08/12 : 0 2 3 1 +r6i4n5:37339:38000 [0] NCCL INFO Channel 09/12 : 0 2 1 3 +r6i4n5:37339:38000 [0] NCCL INFO Channel 10/12 : 0 3 1 2 +r6i4n5:37339:38000 [0] NCCL INFO Channel 11/12 : 0 3 2 1 +r6i4n5:37339:38000 [0] NCCL INFO threadThresholds 8/8/64 | 32/8/64 | 8/8/64 +r6i4n5:37339:38000 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1|-1->0->1/-1/-1 [1] 2/-1/-1->0->3|3->0->2/-1/-1 [2] 3/-1/-1->0->2|2->0->3/-1/-1 [3] -1/-1/-1->0->1|1->0->-1/-1/-1 [4] 1/-1/-1->0->-1|-1->0->1/-1/-1 [5] 2/-1/-1->0->3|3->0->2/-1/-1 [6] 1/-1/-1->0->-1|-1->0->1/-1/-1 [7] 2/-1/-1->0->3|3->0->2/-1/-1 [8] 3/-1/-1->0->2|2->0->3/-1/-1 [9] -1/-1/-1->0->1|1->0->-1/-1/-1 [10] 1/-1/-1->0->-1|-1->0->1/-1/-1 [11] 2/-1/-1->0->3|3->0->2/-1/-1 +r6i4n5:37339:38000 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r6i4n5:37342:38001 [3] NCCL INFO Channel 00 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[88000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 00 : 2[88000] -> 3[8a000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 00 : 2[88000] -> 1[1c000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 00 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 01 : 2[88000] -> 0[1a000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 01 : 1[1c000] -> 3[8a000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 01 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 01 : 3[8a000] -> 1[1c000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 01 : 0[1a000] -> 2[88000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 01 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 02 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 02 : 3[8a000] -> 1[1c000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 02 : 0[1a000] -> 2[88000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 02 : 2[88000] -> 3[8a000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 02 : 1[1c000] -> 3[8a000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 02 : 2[88000] -> 0[1a000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 02 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 02 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 03 : 1[1c000] -> 3[8a000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 03 : 2[88000] -> 1[1c000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 03 : 0[1a000] -> 2[88000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 03 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 03 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 03 : 2[88000] -> 3[8a000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 03 : 1[1c000] -> 2[88000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 03 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 03 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 04 : 2[88000] -> 0[1a000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 04 : 3[8a000] -> 1[1c000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 04 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 04 : 1[1c000] -> 2[88000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 04 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 04 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 04 : 2[88000] -> 1[1c000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 04 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 04 : 2[88000] -> 3[8a000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 05 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 05 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 05 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 05 : 2[88000] -> 1[1c000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 05 : 3[8a000] -> 1[1c000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 05 : 2[88000] -> 0[1a000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 05 : 1[1c000] -> 3[8a000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 05 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 05 : 0[1a000] -> 2[88000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 06 : 1[1c000] -> 2[88000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 06 : 2[88000] -> 3[8a000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 06 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 06 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 06 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 06 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 06 : 2[88000] -> 1[1c000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 07 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 07 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 07 : 1[1c000] -> 3[8a000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 07 : 2[88000] -> 0[1a000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 07 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 07 : 3[8a000] -> 1[1c000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 07 : 0[1a000] -> 2[88000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 07 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 08 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 08 : 2[88000] -> 3[8a000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 08 : 0[1a000] -> 2[88000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 08 : 3[8a000] -> 1[1c000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 08 : 2[88000] -> 0[1a000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 08 : 1[1c000] -> 3[8a000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 08 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 09 : 1[1c000] -> 3[8a000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 08 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 09 : 2[88000] -> 1[1c000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 09 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 09 : 0[1a000] -> 2[88000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 09 : 1[1c000] -> 2[88000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 09 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 09 : 2[88000] -> 3[8a000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 09 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 09 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 10 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 10 : 1[1c000] -> 2[88000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 10 : 2[88000] -> 0[1a000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 10 : 3[8a000] -> 1[1c000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 10 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 10 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 10 : 2[88000] -> 1[1c000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 10 : 0[1a000] -> 1[1c000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 10 : 2[88000] -> 3[8a000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 11 : 1[1c000] -> 0[1a000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 11 : 0[1a000] -> 3[8a000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 11 : 3[8a000] -> 2[88000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 11 : 2[88000] -> 1[1c000] via P2P/IPC +r6i4n5:37341:38002 [2] NCCL INFO Channel 11 : 2[88000] -> 0[1a000] via P2P/IPC +r6i4n5:37342:38001 [3] NCCL INFO Channel 11 : 3[8a000] -> 1[1c000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO Channel 11 : 1[1c000] -> 3[8a000] via P2P/IPC +r6i4n5:37339:38000 [0] NCCL INFO Channel 11 : 0[1a000] -> 2[88000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r6i4n5:37342:38001 [3] NCCL INFO Channel 11 : 3[8a000] -> 0[1a000] via P2P/IPC +r6i4n5:37340:38003 [1] NCCL INFO comm 0x15401051cf60 rank 1 nranks 4 cudaDev 1 busId 1c000 - Init COMPLETE +r6i4n5:37341:38002 [2] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r6i4n5:37341:38002 [2] NCCL INFO comm 0x14a168525010 rank 2 nranks 4 cudaDev 2 busId 88000 - Init COMPLETE +r6i4n5:37342:38001 [3] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r6i4n5:37342:38001 [3] NCCL INFO comm 0x15289c519d90 rank 3 nranks 4 cudaDev 3 busId 8a000 - Init COMPLETE +r6i4n5:37339:38000 [0] NCCL INFO 12 coll channels, 16 p2p channels, 4 p2p channels per peer +r6i4n5:37339:38000 [0] NCCL INFO comm 0x147a84001060 rank 0 nranks 4 cudaDev 0 busId 1a000 - Init COMPLETE +r6i4n5:37339:37339 [0] NCCL INFO Launch mode Parallel +[2021-05-27 19:51:55,061] [INFO] [logging.py:60:log_dist] [Rank 0] step=1, skipped=0, lr=[1.875e-05, 1.875e-05], mom=[(0.9, 0.999), (0.9, 0.999)] +r7i1n3:4941:5144 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i1n3:4941:5144 [0] NCCL INFO Trees [0] -1/-1/-1->1->2|2->1->-1/-1/-1 [1] 9/-1/-1->1->-1|-1->1->9/-1/-1 +r7i1n3:4941:5144 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r6i4n5:37339:38051 [0] NCCL INFO Channel 00/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 +r6i4n5:37339:38051 [0] NCCL INFO Channel 01/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 +r7i4n5:46158:46356 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i4n5:46158:46356 [0] NCCL INFO Trees [0] -1/-1/-1->5->6|6->5->-1/-1/-1 [1] 3/7/-1->5->9|9->5->3/7/-1 +r7i4n4:1766:2011 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i4n4:1766:2011 [0] NCCL INFO Trees [0] 2/6/-1->4->8|8->4->2/6/-1 [1] -1/-1/-1->4->3|3->4->-1/-1/-1 +r7i4n4:1766:2011 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i5n3:79994:80190 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i5n3:79994:80190 [0] NCCL INFO Trees [0] 5/7/-1->6->4|4->6->5/7/-1 [1] -1/-1/-1->6->7|7->6->-1/-1/-1 +r7i5n3:79994:80190 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i2n6:892:1130 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i2n6:892:1130 [0] NCCL INFO Trees [0] 1/3/-1->2->4|4->2->1/3/-1 [1] -1/-1/-1->2->3|3->2->-1/-1/-1 +r7i2n6:892:1130 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i6n8:29152:29340 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i6n8:29152:29340 [0] NCCL INFO Trees [0] -1/-1/-1->7->6|6->7->-1/-1/-1 [1] 6/8/-1->7->5|5->7->6/8/-1 +r7i6n8:29152:29340 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n7:8760:9309 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r9i1n7:8760:9309 [0] NCCL INFO Trees [0] -1/-1/-1->15->14|14->15->-1/-1/-1 [1] 14/0/-1->15->13|13->15->14/0/-1 +r9i1n7:8760:9309 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i3n0:38598:38854 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i3n0:38598:38854 [0] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 2/4/-1->3->5|5->3->2/4/-1 +r7i3n0:38598:38854 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n0:55673:55899 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i7n0:55673:55899 [0] NCCL INFO Trees [0] 4/12/-1->8->0|0->8->4/12/-1 [1] -1/-1/-1->8->7|7->8->-1/-1/-1 +r7i7n0:55673:55899 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n6:58312:58552 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r9i1n6:58312:58552 [0] NCCL INFO Trees [0] 13/15/-1->14->12|12->14->13/15/-1 [1] -1/-1/-1->14->15|15->14->-1/-1/-1 +r6i4n5:37339:38051 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r6i4n5:37339:38051 [0] NCCL INFO Trees [0] 8/-1/-1->0->-1|-1->0->8/-1/-1 [1] -1/-1/-1->0->15|15->0->-1/-1/-1 +r7i7n1:68404:68632 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i7n1:68404:68632 [0] NCCL INFO Trees [0] -1/-1/-1->9->10|10->9->-1/-1/-1 [1] 5/13/-1->9->1|1->9->5/13/-1 +r7i7n1:68404:68632 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r6i4n5:37339:38051 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i4n5:46158:46356 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n5:40813:41076 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r9i1n5:40813:41076 [0] NCCL INFO Trees [0] -1/-1/-1->13->14|14->13->-1/-1/-1 [1] 11/15/-1->13->9|9->13->11/15/-1 +r9i1n4:24818:25013 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r9i1n4:24818:25013 [0] NCCL INFO Trees [0] 10/14/-1->12->8|8->12->10/14/-1 [1] -1/-1/-1->12->11|11->12->-1/-1/-1 +r9i1n4:24818:25013 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n2:57608:57832 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i7n2:57608:57832 [0] NCCL INFO Trees [0] 9/11/-1->10->12|12->10->9/11/-1 [1] -1/-1/-1->10->11|11->10->-1/-1/-1 +r9i1n6:58312:58552 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r8i0n3:57932:58124 [0] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r8i0n3:57932:58124 [0] NCCL INFO Trees [0] -1/-1/-1->11->10|10->11->-1/-1/-1 [1] 10/12/-1->11->13|13->11->10/12/-1 +r8i0n3:57932:58124 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r9i1n5:40813:41076 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i7n2:57608:57832 [0] NCCL INFO Setting affinity for GPU 0 to 0fffff +r7i1n3:4943:5143 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i1n3:4943:5143 [2] NCCL INFO Trees [0] -1/-1/-1->1->2|2->1->-1/-1/-1 [1] 9/-1/-1->1->-1|-1->1->9/-1/-1 +r7i1n3:4943:5143 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i2n6:894:1132 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i2n6:894:1132 [2] NCCL INFO Trees [0] 1/3/-1->2->4|4->2->1/3/-1 [1] -1/-1/-1->2->3|3->2->-1/-1/-1 +r7i2n6:894:1132 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i3n0:38600:38856 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i3n0:38600:38856 [2] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 2/4/-1->3->5|5->3->2/4/-1 +r7i3n0:38600:38856 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n5:40815:41077 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r6i4n5:37341:38054 [2] NCCL INFO Channel 00/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 +r9i1n7:8762:9310 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r8i0n3:57934:58127 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r8i0n3:57934:58127 [2] NCCL INFO Trees [0] -1/-1/-1->11->10|10->11->-1/-1/-1 [1] 10/12/-1->11->13|13->11->10/12/-1 +r7i5n3:79996:80192 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i7n2:57610:57834 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i6n8:29154:29341 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r9i1n5:40815:41077 [2] NCCL INFO Trees [0] -1/-1/-1->13->14|14->13->-1/-1/-1 [1] 11/15/-1->13->9|9->13->11/15/-1 +r9i1n5:40815:41077 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n0:55675:55901 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i7n1:68406:68633 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i7n1:68406:68633 [2] NCCL INFO Trees [0] -1/-1/-1->9->10|10->9->-1/-1/-1 [1] 5/13/-1->9->1|1->9->5/13/-1 +r7i4n4:1768:2013 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r6i4n5:37341:38054 [2] NCCL INFO Channel 01/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 +r9i1n4:24820:25012 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r9i1n4:24820:25012 [2] NCCL INFO Trees [0] 10/14/-1->12->8|8->12->10/14/-1 [1] -1/-1/-1->12->11|11->12->-1/-1/-1 +r9i1n4:24820:25012 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n6:58314:58551 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r9i1n6:58314:58551 [2] NCCL INFO Trees [0] 13/15/-1->14->12|12->14->13/15/-1 [1] -1/-1/-1->14->15|15->14->-1/-1/-1 +r9i1n6:58314:58551 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n5:46160:46357 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i4n5:46160:46357 [2] NCCL INFO Trees [0] -1/-1/-1->5->6|6->5->-1/-1/-1 [1] 3/7/-1->5->9|9->5->3/7/-1 +r7i4n5:46160:46357 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n7:8762:9310 [2] NCCL INFO Trees [0] -1/-1/-1->15->14|14->15->-1/-1/-1 [1] 14/0/-1->15->13|13->15->14/0/-1 +r8i0n3:57934:58127 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i5n3:79996:80192 [2] NCCL INFO Trees [0] 5/7/-1->6->4|4->6->5/7/-1 [1] -1/-1/-1->6->7|7->6->-1/-1/-1 +r9i1n7:8762:9310 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i1n3:4942:5142 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i1n3:4942:5142 [1] NCCL INFO Trees [0] -1/-1/-1->1->2|2->1->-1/-1/-1 [1] 9/-1/-1->1->-1|-1->1->9/-1/-1 +r7i5n3:79996:80192 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n2:57610:57834 [2] NCCL INFO Trees [0] 9/11/-1->10->12|12->10->9/11/-1 [1] -1/-1/-1->10->11|11->10->-1/-1/-1 +r7i6n8:29154:29341 [2] NCCL INFO Trees [0] -1/-1/-1->7->6|6->7->-1/-1/-1 [1] 6/8/-1->7->5|5->7->6/8/-1 +r7i7n2:57610:57834 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i6n8:29154:29341 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r9i1n5:40814:41075 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i7n0:55675:55901 [2] NCCL INFO Trees [0] 4/12/-1->8->0|0->8->4/12/-1 [1] -1/-1/-1->8->7|7->8->-1/-1/-1 +r7i7n0:55675:55901 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n4:1768:2013 [2] NCCL INFO Trees [0] 2/6/-1->4->8|8->4->2/6/-1 [1] -1/-1/-1->4->3|3->4->-1/-1/-1 +r7i7n1:68406:68633 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i2n6:893:1131 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i3n0:38599:38855 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i3n0:38599:38855 [1] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 2/4/-1->3->5|5->3->2/4/-1 +r7i4n4:1768:2013 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i7n1:68405:68634 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r9i1n4:24819:25014 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r6i4n5:37341:38054 [2] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i4n4:1767:2012 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r9i1n6:58313:58550 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i7n1:68405:68634 [1] NCCL INFO Trees [0] -1/-1/-1->9->10|10->9->-1/-1/-1 [1] 5/13/-1->9->1|1->9->5/13/-1 +r7i7n1:68405:68634 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n4:24819:25014 [1] NCCL INFO Trees [0] 10/14/-1->12->8|8->12->10/14/-1 [1] -1/-1/-1->12->11|11->12->-1/-1/-1 +r9i1n4:24819:25014 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r6i4n5:37341:38054 [2] NCCL INFO Trees [0] 8/-1/-1->0->-1|-1->0->8/-1/-1 [1] -1/-1/-1->0->15|15->0->-1/-1/-1 +r6i4n5:37341:38054 [2] NCCL INFO Setting affinity for GPU 2 to ff,fff00000 +r7i4n4:1767:2012 [1] NCCL INFO Trees [0] 2/6/-1->4->8|8->4->2/6/-1 [1] -1/-1/-1->4->3|3->4->-1/-1/-1 +r9i1n6:58313:58550 [1] NCCL INFO Trees [0] 13/15/-1->14->12|12->14->13/15/-1 [1] -1/-1/-1->14->15|15->14->-1/-1/-1 +r9i1n6:58313:58550 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r6i4n5:37340:38053 [1] NCCL INFO Channel 00/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 +r7i4n5:46159:46358 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i4n5:46159:46358 [1] NCCL INFO Trees [0] -1/-1/-1->5->6|6->5->-1/-1/-1 [1] 3/7/-1->5->9|9->5->3/7/-1 +r7i4n4:1767:2012 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n7:8761:9311 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r6i4n5:37340:38053 [1] NCCL INFO Channel 01/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 +r7i4n5:46159:46358 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r8i0n3:57933:58125 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r9i1n7:8761:9311 [1] NCCL INFO Trees [0] -1/-1/-1->15->14|14->15->-1/-1/-1 [1] 14/0/-1->15->13|13->15->14/0/-1 +r7i1n3:4942:5142 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i4n5:46161:46359 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r8i0n3:57933:58125 [1] NCCL INFO Trees [0] -1/-1/-1->11->10|10->11->-1/-1/-1 [1] 10/12/-1->11->13|13->11->10/12/-1 +r8i0n3:57933:58125 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n7:8761:9311 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i1n3:4944:5145 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i1n3:4944:5145 [3] NCCL INFO Trees [0] -1/-1/-1->1->2|2->1->-1/-1/-1 [1] 9/-1/-1->1->-1|-1->1->9/-1/-1 +r7i4n5:46161:46359 [3] NCCL INFO Trees [0] -1/-1/-1->5->6|6->5->-1/-1/-1 [1] 3/7/-1->5->9|9->5->3/7/-1 +r8i0n3:57935:58126 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i5n3:79995:80191 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i5n3:79995:80191 [1] NCCL INFO Trees [0] 5/7/-1->6->4|4->6->5/7/-1 [1] -1/-1/-1->6->7|7->6->-1/-1/-1 +r9i1n7:8763:9312 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i1n3:4944:5145 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i4n5:46161:46359 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r8i0n3:57935:58126 [3] NCCL INFO Trees [0] -1/-1/-1->11->10|10->11->-1/-1/-1 [1] 10/12/-1->11->13|13->11->10/12/-1 +r8i0n3:57935:58126 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i5n3:79995:80191 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n2:57609:57833 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r9i1n7:8763:9312 [3] NCCL INFO Trees [0] -1/-1/-1->15->14|14->15->-1/-1/-1 [1] 14/0/-1->15->13|13->15->14/0/-1 +r7i5n3:79997:80193 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i5n3:79997:80193 [3] NCCL INFO Trees [0] 5/7/-1->6->4|4->6->5/7/-1 [1] -1/-1/-1->6->7|7->6->-1/-1/-1 +r7i7n2:57609:57833 [1] NCCL INFO Trees [0] 9/11/-1->10->12|12->10->9/11/-1 [1] -1/-1/-1->10->11|11->10->-1/-1/-1 +r9i1n7:8763:9312 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i6n8:29153:29342 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i5n3:79997:80193 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n2:57609:57833 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i6n8:29153:29342 [1] NCCL INFO Trees [0] -1/-1/-1->7->6|6->7->-1/-1/-1 [1] 6/8/-1->7->5|5->7->6/8/-1 +r7i7n2:57611:57835 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i6n8:29153:29342 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n0:55674:55900 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i7n2:57611:57835 [3] NCCL INFO Trees [0] 9/11/-1->10->12|12->10->9/11/-1 [1] -1/-1/-1->10->11|11->10->-1/-1/-1 +r7i6n8:29155:29343 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i7n0:55674:55900 [1] NCCL INFO Trees [0] 4/12/-1->8->0|0->8->4/12/-1 [1] -1/-1/-1->8->7|7->8->-1/-1/-1 +r7i7n0:55674:55900 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n2:57611:57835 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i6n8:29155:29343 [3] NCCL INFO Trees [0] -1/-1/-1->7->6|6->7->-1/-1/-1 [1] 6/8/-1->7->5|5->7->6/8/-1 +r9i1n5:40814:41075 [1] NCCL INFO Trees [0] -1/-1/-1->13->14|14->13->-1/-1/-1 [1] 11/15/-1->13->9|9->13->11/15/-1 +r7i7n0:55676:55902 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i6n8:29155:29343 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n5:40814:41075 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i7n0:55676:55902 [3] NCCL INFO Trees [0] 4/12/-1->8->0|0->8->4/12/-1 [1] -1/-1/-1->8->7|7->8->-1/-1/-1 +r7i2n6:893:1131 [1] NCCL INFO Trees [0] 1/3/-1->2->4|4->2->1/3/-1 [1] -1/-1/-1->2->3|3->2->-1/-1/-1 +r7i3n0:38599:38855 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n5:40816:41078 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i7n0:55676:55902 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i2n6:893:1131 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r7i2n6:895:1133 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i2n6:895:1133 [3] NCCL INFO Trees [0] 1/3/-1->2->4|4->2->1/3/-1 [1] -1/-1/-1->2->3|3->2->-1/-1/-1 +r7i3n0:38601:38857 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i3n0:38601:38857 [3] NCCL INFO Trees [0] -1/-1/-1->3->2|2->3->-1/-1/-1 [1] 2/4/-1->3->5|5->3->2/4/-1 +r9i1n5:40816:41078 [3] NCCL INFO Trees [0] -1/-1/-1->13->14|14->13->-1/-1/-1 [1] 11/15/-1->13->9|9->13->11/15/-1 +r7i7n1:68407:68635 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i2n6:895:1133 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i3n0:38601:38857 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r9i1n5:40816:41078 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i4n4:1769:2014 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r7i7n1:68407:68635 [3] NCCL INFO Trees [0] -1/-1/-1->9->10|10->9->-1/-1/-1 [1] 5/13/-1->9->1|1->9->5/13/-1 +r7i7n1:68407:68635 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i4n4:1769:2014 [3] NCCL INFO Trees [0] 2/6/-1->4->8|8->4->2/6/-1 [1] -1/-1/-1->4->3|3->4->-1/-1/-1 +r7i4n4:1769:2014 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r6i4n5:37340:38053 [1] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r6i4n5:37340:38053 [1] NCCL INFO Trees [0] 8/-1/-1->0->-1|-1->0->8/-1/-1 [1] -1/-1/-1->0->15|15->0->-1/-1/-1 +r9i1n4:24821:25015 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r6i4n5:37340:38053 [1] NCCL INFO Setting affinity for GPU 1 to 0fffff +r9i1n4:24821:25015 [3] NCCL INFO Trees [0] 10/14/-1->12->8|8->12->10/14/-1 [1] -1/-1/-1->12->11|11->12->-1/-1/-1 +r6i4n5:37342:38056 [3] NCCL INFO Channel 00/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 +r9i1n6:58315:58553 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r9i1n4:24821:25015 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r6i4n5:37342:38056 [3] NCCL INFO Channel 01/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 +r9i1n6:58315:58553 [3] NCCL INFO Trees [0] 13/15/-1->14->12|12->14->13/15/-1 [1] -1/-1/-1->14->15|15->14->-1/-1/-1 +r9i1n6:58315:58553 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r6i4n5:37342:38056 [3] NCCL INFO threadThresholds 8/8/64 | 128/8/64 | 8/8/64 +r6i4n5:37342:38056 [3] NCCL INFO Trees [0] 8/-1/-1->0->-1|-1->0->8/-1/-1 [1] -1/-1/-1->0->15|15->0->-1/-1/-1 +r6i4n5:37342:38056 [3] NCCL INFO Setting affinity for GPU 3 to ff,fff00000 +r7i7n0:55674:55900 [1] NCCL INFO Channel 00 : 7[1c000] -> 8[1c000] [receive] via NET/IB/3 +r9i1n5:40813:41076 [0] NCCL INFO Channel 00 : 12[1a000] -> 13[1a000] [receive] via NET/IB/3 +r7i7n2:57609:57833 [1] NCCL INFO Channel 00 : 9[1c000] -> 10[1c000] [receive] via NET/IB/3 +r7i6n8:29152:29340 [0] NCCL INFO Channel 00 : 6[1a000] -> 7[1a000] [receive] via NET/IB/3 +r7i4n4:1767:2012 [1] NCCL INFO Channel 00 : 3[1c000] -> 4[1c000] [receive] via NET/IB/3 +r7i6n8:29153:29342 [1] NCCL INFO Channel 00 : 6[1c000] -> 7[1c000] [receive] via NET/IB/3 +r7i5n3:79994:80190 [0] NCCL INFO Channel 00 : 5[1a000] -> 6[1a000] [receive] via NET/IB/3 +r9i1n7:8760:9309 [0] NCCL INFO Channel 00 : 14[1a000] -> 15[1a000] [receive] via NET/IB/3 +r8i0n3:57932:58124 [0] NCCL INFO Channel 00 : 10[1a000] -> 11[1a000] [receive] via NET/IB/3 +r6i4n5:37340:38053 [1] NCCL INFO Channel 00 : 15[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i2n6:892:1130 [0] NCCL INFO Channel 00 : 1[1a000] -> 2[1a000] [receive] via NET/IB/3 +r9i1n7:8762:9310 [2] NCCL INFO Channel 00 : 14[88000] -> 15[88000] [receive] via NET/IB/2 +r7i7n1:68404:68632 [0] NCCL INFO Channel 00 : 8[1a000] -> 9[1a000] [receive] via NET/IB/3 +r6i4n5:37339:38051 [0] NCCL INFO Channel 00 : 15[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i7n2:57608:57832 [0] NCCL INFO Channel 00 : 9[1a000] -> 10[1a000] [receive] via NET/IB/3 +r7i2n6:894:1132 [2] NCCL INFO Channel 00 : 1[88000] -> 2[88000] [receive] via NET/IB/2 +r7i7n0:55673:55899 [0] NCCL INFO Channel 00 : 7[1a000] -> 8[1a000] [receive] via NET/IB/3 +r7i3n0:38598:38854 [0] NCCL INFO Channel 00 : 2[1a000] -> 3[1a000] [receive] via NET/IB/3 +r9i1n4:24818:25013 [0] NCCL INFO Channel 00 : 11[1a000] -> 12[1a000] [receive] via NET/IB/3 +r7i7n1:68406:68633 [2] NCCL INFO Channel 00 : 8[88000] -> 9[88000] [receive] via NET/IB/2 +r7i1n3:4941:5144 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i4n4:1766:2011 [0] NCCL INFO Channel 00 : 3[1a000] -> 4[1a000] [receive] via NET/IB/3 +r7i2n6:893:1131 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[1c000] [receive] via NET/IB/3 +r9i1n7:8761:9311 [1] NCCL INFO Channel 00 : 14[1c000] -> 15[1c000] [receive] via NET/IB/3 +r7i5n3:79996:80192 [2] NCCL INFO Channel 00 : 5[88000] -> 6[88000] [receive] via NET/IB/2 +r9i1n6:58313:58550 [1] NCCL INFO Channel 00 : 13[1c000] -> 14[1c000] [receive] via NET/IB/3 +r7i4n5:46158:46356 [0] NCCL INFO Channel 00 : 4[1a000] -> 5[1a000] [receive] via NET/IB/3 +r7i7n2:57610:57834 [2] NCCL INFO Channel 00 : 9[88000] -> 10[88000] [receive] via NET/IB/2 +r7i7n0:55675:55901 [2] NCCL INFO Channel 00 : 7[88000] -> 8[88000] [receive] via NET/IB/2 +r9i1n5:40814:41075 [1] NCCL INFO Channel 00 : 12[1c000] -> 13[1c000] [receive] via NET/IB/3 +r8i0n3:57934:58127 [2] NCCL INFO Channel 00 : 10[88000] -> 11[88000] [receive] via NET/IB/2 +r7i3n0:38600:38856 [2] NCCL INFO Channel 00 : 2[88000] -> 3[88000] [receive] via NET/IB/2 +r9i1n4:24820:25012 [2] NCCL INFO Channel 00 : 11[88000] -> 12[88000] [receive] via NET/IB/2 +r7i5n3:79995:80191 [1] NCCL INFO Channel 00 : 5[1c000] -> 6[1c000] [receive] via NET/IB/3 +r9i1n6:58312:58552 [0] NCCL INFO Channel 00 : 13[1a000] -> 14[1a000] [receive] via NET/IB/3 +r7i4n5:46160:46357 [2] NCCL INFO Channel 00 : 4[88000] -> 5[88000] [receive] via NET/IB/2 +r7i7n1:68405:68634 [1] NCCL INFO Channel 00 : 8[1c000] -> 9[1c000] [receive] via NET/IB/3 +r7i1n3:4943:5143 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r7i4n4:1768:2013 [2] NCCL INFO Channel 00 : 3[88000] -> 4[88000] [receive] via NET/IB/2 +r6i4n5:37341:38054 [2] NCCL INFO Channel 00 : 15[88000] -> 0[88000] [receive] via NET/IB/2 +r9i1n5:40815:41077 [2] NCCL INFO Channel 00 : 12[88000] -> 13[88000] [receive] via NET/IB/2 +r9i1n4:24819:25014 [1] NCCL INFO Channel 00 : 11[1c000] -> 12[1c000] [receive] via NET/IB/3 +r9i1n6:58314:58551 [2] NCCL INFO Channel 00 : 13[88000] -> 14[88000] [receive] via NET/IB/2 +r7i3n0:38599:38855 [1] NCCL INFO Channel 00 : 2[1c000] -> 3[1c000] [receive] via NET/IB/3 +r7i1n3:4944:5145 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r8i0n3:57933:58125 [1] NCCL INFO Channel 00 : 10[1c000] -> 11[1c000] [receive] via NET/IB/3 +r7i3n0:38601:38857 [3] NCCL INFO Channel 00 : 2[8a000] -> 3[8a000] [receive] via NET/IB/2 +r7i4n5:46159:46358 [1] NCCL INFO Channel 00 : 4[1c000] -> 5[1c000] [receive] via NET/IB/3 +r9i1n4:24821:25015 [3] NCCL INFO Channel 00 : 11[8a000] -> 12[8a000] [receive] via NET/IB/2 +r7i4n5:46161:46359 [3] NCCL INFO Channel 00 : 4[8a000] -> 5[8a000] [receive] via NET/IB/2 +r8i0n3:57935:58126 [3] NCCL INFO Channel 00 : 10[8a000] -> 11[8a000] [receive] via NET/IB/2 +r7i1n3:4942:5142 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i2n6:895:1133 [3] NCCL INFO Channel 00 : 1[8a000] -> 2[8a000] [receive] via NET/IB/2 +r9i1n6:58315:58553 [3] NCCL INFO Channel 00 : 13[8a000] -> 14[8a000] [receive] via NET/IB/2 +r7i7n1:68407:68635 [3] NCCL INFO Channel 00 : 8[8a000] -> 9[8a000] [receive] via NET/IB/2 +r7i5n3:79997:80193 [3] NCCL INFO Channel 00 : 5[8a000] -> 6[8a000] [receive] via NET/IB/2 +r9i1n7:8763:9312 [3] NCCL INFO Channel 00 : 14[8a000] -> 15[8a000] [receive] via NET/IB/2 +r7i6n8:29154:29341 [2] NCCL INFO Channel 00 : 6[88000] -> 7[88000] [receive] via NET/IB/2 +r9i1n5:40816:41078 [3] NCCL INFO Channel 00 : 12[8a000] -> 13[8a000] [receive] via NET/IB/2 +r7i4n4:1769:2014 [3] NCCL INFO Channel 00 : 3[8a000] -> 4[8a000] [receive] via NET/IB/2 +r7i7n0:55674:55900 [1] NCCL INFO Channel 00 : 8[1c000] -> 9[1c000] [send] via NET/IB/3 +r7i7n2:57609:57833 [1] NCCL INFO Channel 00 : 10[1c000] -> 11[1c000] [send] via NET/IB/3 +r9i1n5:40813:41076 [0] NCCL INFO Channel 00 : 13[1a000] -> 14[1a000] [send] via NET/IB/3 +r7i4n4:1767:2012 [1] NCCL INFO Channel 00 : 4[1c000] -> 5[1c000] [send] via NET/IB/3 +r7i5n3:79994:80190 [0] NCCL INFO Channel 00 : 6[1a000] -> 7[1a000] [send] via NET/IB/3 +r7i1n3:4941:5144 [0] NCCL INFO Channel 00 : 1[1a000] -> 2[1a000] [send] via NET/IB/3 +r7i6n8:29152:29340 [0] NCCL INFO Channel 00 : 7[1a000] -> 8[1a000] [send] via NET/IB/3 +r8i0n3:57932:58124 [0] NCCL INFO Channel 00 : 11[1a000] -> 12[1a000] [send] via NET/IB/3 +r7i2n6:892:1130 [0] NCCL INFO Channel 00 : 2[1a000] -> 3[1a000] [send] via NET/IB/3 +r9i1n4:24818:25013 [0] NCCL INFO Channel 00 : 12[1a000] -> 13[1a000] [send] via NET/IB/3 +r7i6n8:29153:29342 [1] NCCL INFO Channel 00 : 7[1c000] -> 8[1c000] [send] via NET/IB/3 +r9i1n7:8760:9309 [0] NCCL INFO Channel 00 : 15[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i4n4:1766:2011 [0] NCCL INFO Channel 00 : 4[1a000] -> 5[1a000] [send] via NET/IB/3 +r8i0n3:57934:58127 [2] NCCL INFO Channel 00 : 11[88000] -> 12[88000] [send] via NET/IB/2 +r6i4n5:37340:38053 [1] NCCL INFO Channel 00 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i3n0:38598:38854 [0] NCCL INFO Channel 00 : 3[1a000] -> 4[1a000] [send] via NET/IB/3 +r7i6n8:29155:29343 [3] NCCL INFO Channel 00 : 6[8a000] -> 7[8a000] [receive] via NET/IB/2 +r7i5n3:79995:80191 [1] NCCL INFO Channel 00 : 6[1c000] -> 7[1c000] [send] via NET/IB/3 +r7i4n4:1768:2013 [2] NCCL INFO Channel 00 : 4[88000] -> 5[88000] [send] via NET/IB/2 +r7i7n1:68406:68633 [2] NCCL INFO Channel 00 : 9[88000] -> 10[88000] [send] via NET/IB/2 +r9i1n6:58313:58550 [1] NCCL INFO Channel 00 : 14[1c000] -> 15[1c000] [send] via NET/IB/3 +r9i1n7:8761:9311 [1] NCCL INFO Channel 00 : 15[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i2n6:894:1132 [2] NCCL INFO Channel 00 : 2[88000] -> 3[88000] [send] via NET/IB/2 +r7i2n6:893:1131 [1] NCCL INFO Channel 00 : 2[1c000] -> 3[1c000] [send] via NET/IB/3 +r7i5n3:79996:80192 [2] NCCL INFO Channel 00 : 6[88000] -> 7[88000] [send] via NET/IB/2 +r9i1n5:40814:41075 [1] NCCL INFO Channel 00 : 13[1c000] -> 14[1c000] [send] via NET/IB/3 +r9i1n7:8762:9310 [2] NCCL INFO Channel 00 : 15[88000] -> 0[88000] [send] via NET/IB/2 +r7i7n1:68404:68632 [0] NCCL INFO Channel 00 : 9[1a000] -> 10[1a000] [send] via NET/IB/3 +r7i3n0:38600:38856 [2] NCCL INFO Channel 00 : 3[88000] -> 4[88000] [send] via NET/IB/2 +r7i7n1:68405:68634 [1] NCCL INFO Channel 00 : 9[1c000] -> 10[1c000] [send] via NET/IB/3 +r9i1n6:58312:58552 [0] NCCL INFO Channel 00 : 14[1a000] -> 15[1a000] [send] via NET/IB/3 +r9i1n4:24820:25012 [2] NCCL INFO Channel 00 : 12[88000] -> 13[88000] [send] via NET/IB/2 +r6i4n5:37339:38051 [0] NCCL INFO Channel 00 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r7i4n5:46160:46357 [2] NCCL INFO Channel 00 : 5[88000] -> 6[88000] [send] via NET/IB/2 +r9i1n6:58314:58551 [2] NCCL INFO Channel 00 : 14[88000] -> 15[88000] [send] via NET/IB/2 +r7i3n0:38599:38855 [1] NCCL INFO Channel 00 : 3[1c000] -> 4[1c000] [send] via NET/IB/3 +r9i1n4:24819:25014 [1] NCCL INFO Channel 00 : 12[1c000] -> 13[1c000] [send] via NET/IB/3 +r8i0n3:57933:58125 [1] NCCL INFO Channel 00 : 11[1c000] -> 12[1c000] [send] via NET/IB/3 +r9i1n5:40815:41077 [2] NCCL INFO Channel 00 : 13[88000] -> 14[88000] [send] via NET/IB/2 +r7i3n0:38601:38857 [3] NCCL INFO Channel 00 : 3[8a000] -> 4[8a000] [send] via NET/IB/2 +r7i4n5:46158:46356 [0] NCCL INFO Channel 00 : 5[1a000] -> 6[1a000] [send] via NET/IB/3 +r6i4n5:37341:38054 [2] NCCL INFO Channel 00 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r7i7n0:55673:55899 [0] NCCL INFO Channel 00 : 8[1a000] -> 9[1a000] [send] via NET/IB/3 +r9i1n4:24821:25015 [3] NCCL INFO Channel 00 : 12[8a000] -> 13[8a000] [send] via NET/IB/2 +r6i4n5:37342:38056 [3] NCCL INFO Channel 00 : 15[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i7n2:57608:57832 [0] NCCL INFO Channel 00 : 10[1a000] -> 11[1a000] [send] via NET/IB/3 +r8i0n3:57935:58126 [3] NCCL INFO Channel 00 : 11[8a000] -> 12[8a000] [send] via NET/IB/2 +r7i4n5:46159:46358 [1] NCCL INFO Channel 00 : 5[1c000] -> 6[1c000] [send] via NET/IB/3 +r7i4n5:46161:46359 [3] NCCL INFO Channel 00 : 5[8a000] -> 6[8a000] [send] via NET/IB/2 +r7i7n0:55675:55901 [2] NCCL INFO Channel 00 : 8[88000] -> 9[88000] [send] via NET/IB/2 +r7i1n3:4944:5145 [3] NCCL INFO Channel 00 : 1[8a000] -> 2[8a000] [send] via NET/IB/2 +r7i7n2:57610:57834 [2] NCCL INFO Channel 00 : 10[88000] -> 11[88000] [send] via NET/IB/2 +r7i1n3:4943:5143 [2] NCCL INFO Channel 00 : 1[88000] -> 2[88000] [send] via NET/IB/2 +r7i7n0:55676:55902 [3] NCCL INFO Channel 00 : 7[8a000] -> 8[8a000] [receive] via NET/IB/2 +r7i1n3:4942:5142 [1] NCCL INFO Channel 00 : 1[1c000] -> 2[1c000] [send] via NET/IB/3 +r7i7n2:57611:57835 [3] NCCL INFO Channel 00 : 9[8a000] -> 10[8a000] [receive] via NET/IB/2 +r7i7n1:68407:68635 [3] NCCL INFO Channel 00 : 9[8a000] -> 10[8a000] [send] via NET/IB/2 +r7i2n6:895:1133 [3] NCCL INFO Channel 00 : 2[8a000] -> 3[8a000] [send] via NET/IB/2 +r7i5n3:79997:80193 [3] NCCL INFO Channel 00 : 6[8a000] -> 7[8a000] [send] via NET/IB/2 +r9i1n6:58315:58553 [3] NCCL INFO Channel 00 : 14[8a000] -> 15[8a000] [send] via NET/IB/2 +r9i1n7:8763:9312 [3] NCCL INFO Channel 00 : 15[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i6n8:29154:29341 [2] NCCL INFO Channel 00 : 7[88000] -> 8[88000] [send] via NET/IB/2 +r9i1n5:40816:41078 [3] NCCL INFO Channel 00 : 13[8a000] -> 14[8a000] [send] via NET/IB/2 +r7i4n4:1769:2014 [3] NCCL INFO Channel 00 : 4[8a000] -> 5[8a000] [send] via NET/IB/2 +r7i6n8:29155:29343 [3] NCCL INFO Channel 00 : 7[8a000] -> 8[8a000] [send] via NET/IB/2 +r6i4n5:37342:38056 [3] NCCL INFO Channel 00 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r7i7n0:55676:55902 [3] NCCL INFO Channel 00 : 8[8a000] -> 9[8a000] [send] via NET/IB/2 +r7i7n2:57611:57835 [3] NCCL INFO Channel 00 : 10[8a000] -> 11[8a000] [send] via NET/IB/2 +r7i7n0:55674:55900 [1] NCCL INFO Channel 00 : 4[1c000] -> 8[1c000] [receive] via NET/IB/2 +r7i7n2:57609:57833 [1] NCCL INFO Channel 00 : 11[1c000] -> 10[1c000] [receive] via NET/IB/2 +r7i6n8:29152:29340 [0] NCCL INFO Channel 00 : 7[1a000] -> 6[1a000] [send] via NET/IB/2 +r7i6n8:29153:29342 [1] NCCL INFO Channel 00 : 7[1c000] -> 6[1c000] [send] via NET/IB/2 +r7i1n3:4941:5144 [0] NCCL INFO Channel 00 : 2[1a000] -> 1[1a000] [receive] via NET/IB/2 +r9i1n5:40814:41075 [1] NCCL INFO Channel 00 : 14[1c000] -> 13[1c000] [receive] via NET/IB/2 +r9i1n5:40815:41077 [2] NCCL INFO Channel 00 : 14[88000] -> 13[88000] [receive] via NET/IB/3 +r9i1n5:40813:41076 [0] NCCL INFO Channel 00 : 14[1a000] -> 13[1a000] [receive] via NET/IB/2 +r7i4n4:1768:2013 [2] NCCL INFO Channel 00 : 2[88000] -> 4[88000] [receive] via NET/IB/3 +r7i7n0:55673:55899 [0] NCCL INFO Channel 00 : 4[1a000] -> 8[1a000] [receive] via NET/IB/2 +r7i5n3:79994:80190 [0] NCCL INFO Channel 00 : 7[1a000] -> 6[1a000] [receive] via NET/IB/2 +r7i7n1:68406:68633 [2] NCCL INFO Channel 00 : 10[88000] -> 9[88000] [receive] via NET/IB/3 +r9i1n7:8760:9309 [0] NCCL INFO Channel 00 : 15[1a000] -> 14[1a000] [send] via NET/IB/2 +r7i4n4:1766:2011 [0] NCCL INFO Channel 00 : 2[1a000] -> 4[1a000] [receive] via NET/IB/2 +r8i0n3:57934:58127 [2] NCCL INFO Channel 00 : 11[88000] -> 10[88000] [send] via NET/IB/3 +r7i3n0:38598:38854 [0] NCCL INFO Channel 00 : 3[1a000] -> 2[1a000] [send] via NET/IB/2 +r9i1n4:24818:25013 [0] NCCL INFO Channel 00 : 10[1a000] -> 12[1a000] [receive] via NET/IB/2 +r7i4n4:1767:2012 [1] NCCL INFO Channel 00 : 2[1c000] -> 4[1c000] [receive] via NET/IB/2 +r9i1n7:8762:9310 [2] NCCL INFO Channel 00 : 15[88000] -> 14[88000] [send] via NET/IB/3 +r7i5n3:79995:80191 [1] NCCL INFO Channel 00 : 7[1c000] -> 6[1c000] [receive] via NET/IB/2 +r7i5n3:79996:80192 [2] NCCL INFO Channel 00 : 7[88000] -> 6[88000] [receive] via NET/IB/3 +r7i7n2:57610:57834 [2] NCCL INFO Channel 00 : 11[88000] -> 10[88000] [receive] via NET/IB/3 +r9i1n7:8761:9311 [1] NCCL INFO Channel 00 : 15[1c000] -> 14[1c000] [send] via NET/IB/2 +r8i0n3:57932:58124 [0] NCCL INFO Channel 00 : 11[1a000] -> 10[1a000] [send] via NET/IB/2 +r7i7n1:68404:68632 [0] NCCL INFO Channel 00 : 10[1a000] -> 9[1a000] [receive] via NET/IB/2 +r9i1n4:24819:25014 [1] NCCL INFO Channel 00 : 10[1c000] -> 12[1c000] [receive] via NET/IB/2 +r7i3n0:38600:38856 [2] NCCL INFO Channel 00 : 3[88000] -> 2[88000] [send] via NET/IB/3 +r9i1n4:24820:25012 [2] NCCL INFO Channel 00 : 10[88000] -> 12[88000] [receive] via NET/IB/3 +r8i0n3:57933:58125 [1] NCCL INFO Channel 00 : 11[1c000] -> 10[1c000] [send] via NET/IB/2 +r7i7n1:68405:68634 [1] NCCL INFO Channel 00 : 10[1c000] -> 9[1c000] [receive] via NET/IB/2 +r7i1n3:4943:5143 [2] NCCL INFO Channel 00 : 2[88000] -> 1[88000] [receive] via NET/IB/3 +r7i7n2:57608:57832 [0] NCCL INFO Channel 00 : 11[1a000] -> 10[1a000] [receive] via NET/IB/2 +r9i1n4:24821:25015 [3] NCCL INFO Channel 00 : 10[8a000] -> 12[8a000] [receive] via NET/IB/3 +r7i4n5:46160:46357 [2] NCCL INFO Channel 00 : 6[88000] -> 5[88000] [receive] via NET/IB/3 +r7i5n3:79997:80193 [3] NCCL INFO Channel 00 : 7[8a000] -> 6[8a000] [receive] via NET/IB/3 +r6i4n5:37339:38051 [0] NCCL INFO Channel 00 : 8[1a000] -> 0[1a000] [receive] via NET/IB/2 +r9i1n7:8763:9312 [3] NCCL INFO Channel 00 : 15[8a000] -> 14[8a000] [send] via NET/IB/3 +r7i1n3:4942:5142 [1] NCCL INFO Channel 00 : 2[1c000] -> 1[1c000] [receive] via NET/IB/2 +r9i1n6:58313:58550 [1] NCCL INFO Channel 00 : 15[1c000] -> 14[1c000] [receive] via NET/IB/2 +r7i3n0:38601:38857 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[8a000] [send] via NET/IB/3 +r6i4n5:37341:38054 [2] NCCL INFO Channel 00 : 8[88000] -> 0[88000] [receive] via NET/IB/3 +r7i3n0:38599:38855 [1] NCCL INFO Channel 00 : 3[1c000] -> 2[1c000] [send] via NET/IB/2 +r7i2n6:894:1132 [2] NCCL INFO Channel 00 : 3[88000] -> 2[88000] [receive] via NET/IB/3 +r9i1n6:58314:58551 [2] NCCL INFO Channel 00 : 15[88000] -> 14[88000] [receive] via NET/IB/3 +r7i4n5:46159:46358 [1] NCCL INFO Channel 00 : 6[1c000] -> 5[1c000] [receive] via NET/IB/2 +r7i2n6:892:1130 [0] NCCL INFO Channel 00 : 3[1a000] -> 2[1a000] [receive] via NET/IB/2 +r6i4n5:37340:38053 [1] NCCL INFO Channel 00 : 8[1c000] -> 0[1c000] [receive] via NET/IB/2 +r7i4n5:46158:46356 [0] NCCL INFO Channel 00 : 6[1a000] -> 5[1a000] [receive] via NET/IB/2 +r9i1n6:58312:58552 [0] NCCL INFO Channel 00 : 15[1a000] -> 14[1a000] [receive] via NET/IB/2 +r7i2n6:893:1131 [1] NCCL INFO Channel 00 : 3[1c000] -> 2[1c000] [receive] via NET/IB/2 +r7i2n6:895:1133 [3] NCCL INFO Channel 00 : 3[8a000] -> 2[8a000] [receive] via NET/IB/3 +r7i7n0:55675:55901 [2] NCCL INFO Channel 00 : 4[88000] -> 8[88000] [receive] via NET/IB/3 +r9i1n6:58315:58553 [3] NCCL INFO Channel 00 : 15[8a000] -> 14[8a000] [receive] via NET/IB/3 +r9i1n5:40816:41078 [3] NCCL INFO Channel 00 : 14[8a000] -> 13[8a000] [receive] via NET/IB/3 +r7i4n5:46161:46359 [3] NCCL INFO Channel 00 : 6[8a000] -> 5[8a000] [receive] via NET/IB/3 +r7i6n8:29154:29341 [2] NCCL INFO Channel 00 : 7[88000] -> 6[88000] [send] via NET/IB/3 +r7i4n4:1769:2014 [3] NCCL INFO Channel 00 : 2[8a000] -> 4[8a000] [receive] via NET/IB/3 +r7i7n1:68407:68635 [3] NCCL INFO Channel 00 : 10[8a000] -> 9[8a000] [receive] via NET/IB/3 +r7i1n3:4944:5145 [3] NCCL INFO Channel 00 : 2[8a000] -> 1[8a000] [receive] via NET/IB/3 +r7i7n0:55674:55900 [1] NCCL INFO Channel 00 : 12[1c000] -> 8[1c000] [receive] via NET/IB/2 +r7i6n8:29155:29343 [3] NCCL INFO Channel 00 : 7[8a000] -> 6[8a000] [send] via NET/IB/3 +r7i7n2:57609:57833 [1] NCCL INFO Channel 00 : 10[1c000] -> 12[1c000] [send] via NET/IB/2 +r6i4n5:37342:38056 [3] NCCL INFO Channel 00 : 8[8a000] -> 0[8a000] [receive] via NET/IB/3 +r8i0n3:57935:58126 [3] NCCL INFO Channel 00 : 11[8a000] -> 10[8a000] [send] via NET/IB/3 +r7i7n0:55676:55902 [3] NCCL INFO Channel 00 : 4[8a000] -> 8[8a000] [receive] via NET/IB/3 +r9i1n4:24818:25013 [0] NCCL INFO Channel 00 : 14[1a000] -> 12[1a000] [receive] via NET/IB/2 +r7i7n0:55673:55899 [0] NCCL INFO Channel 00 : 12[1a000] -> 8[1a000] [receive] via NET/IB/2 +r7i5n3:79994:80190 [0] NCCL INFO Channel 00 : 6[1a000] -> 4[1a000] [send] via NET/IB/2 +r7i4n4:1768:2013 [2] NCCL INFO Channel 00 : 6[88000] -> 4[88000] [receive] via NET/IB/3 +r7i7n2:57611:57835 [3] NCCL INFO Channel 00 : 11[8a000] -> 10[8a000] [receive] via NET/IB/3 +r7i4n4:1766:2011 [0] NCCL INFO Channel 00 : 6[1a000] -> 4[1a000] [receive] via NET/IB/2 +r7i4n4:1767:2012 [1] NCCL INFO Channel 00 : 6[1c000] -> 4[1c000] [receive] via NET/IB/2 +r7i5n3:79995:80191 [1] NCCL INFO Channel 00 : 6[1c000] -> 4[1c000] [send] via NET/IB/2 +r9i1n6:58313:58550 [1] NCCL INFO Channel 00 : 14[1c000] -> 12[1c000] [send] via NET/IB/2 +r7i5n3:79996:80192 [2] NCCL INFO Channel 00 : 6[88000] -> 4[88000] [send] via NET/IB/3 +r7i7n2:57608:57832 [0] NCCL INFO Channel 00 : 10[1a000] -> 12[1a000] [send] via NET/IB/2 +r9i1n4:24819:25014 [1] NCCL INFO Channel 00 : 14[1c000] -> 12[1c000] [receive] via NET/IB/2 +r7i5n3:79997:80193 [3] NCCL INFO Channel 00 : 6[8a000] -> 4[8a000] [send] via NET/IB/3 +r7i7n2:57610:57834 [2] NCCL INFO Channel 00 : 10[88000] -> 12[88000] [send] via NET/IB/3 +r9i1n4:24820:25012 [2] NCCL INFO Channel 00 : 14[88000] -> 12[88000] [receive] via NET/IB/3 +r7i6n8:29153:29342 [1] NCCL INFO Channel 01 : 6[1c000] -> 7[1c000] [receive] via NET/IB/3 +r9i1n4:24821:25015 [3] NCCL INFO Channel 00 : 14[8a000] -> 12[8a000] [receive] via NET/IB/3 +r7i7n0:55675:55901 [2] NCCL INFO Channel 00 : 12[88000] -> 8[88000] [receive] via NET/IB/3 +r7i2n6:894:1132 [2] NCCL INFO Channel 00 : 2[88000] -> 4[88000] [send] via NET/IB/3 +r9i1n6:58314:58551 [2] NCCL INFO Channel 00 : 14[88000] -> 12[88000] [send] via NET/IB/3 +r8i0n3:57934:58127 [2] NCCL INFO Channel 01 : 10[88000] -> 11[88000] [receive] via NET/IB/2 +r9i1n6:58312:58552 [0] NCCL INFO Channel 00 : 14[1a000] -> 12[1a000] [send] via NET/IB/2 +r7i2n6:892:1130 [0] NCCL INFO Channel 00 : 2[1a000] -> 4[1a000] [send] via NET/IB/2 +r7i2n6:893:1131 [1] NCCL INFO Channel 00 : 2[1c000] -> 4[1c000] [send] via NET/IB/2 +r7i6n8:29152:29340 [0] NCCL INFO Channel 01 : 6[1a000] -> 7[1a000] [receive] via NET/IB/3 +r9i1n6:58315:58553 [3] NCCL INFO Channel 00 : 14[8a000] -> 12[8a000] [send] via NET/IB/3 +r7i2n6:895:1133 [3] NCCL INFO Channel 00 : 2[8a000] -> 4[8a000] [send] via NET/IB/3 +r8i0n3:57933:58125 [1] NCCL INFO Channel 01 : 10[1c000] -> 11[1c000] [receive] via NET/IB/3 +r7i4n4:1769:2014 [3] NCCL INFO Channel 00 : 6[8a000] -> 4[8a000] [receive] via NET/IB/3 +r9i1n7:8761:9311 [1] NCCL INFO Channel 01 : 14[1c000] -> 15[1c000] [receive] via NET/IB/3 +r7i6n8:29154:29341 [2] NCCL INFO Channel 01 : 6[88000] -> 7[88000] [receive] via NET/IB/2 +r7i3n0:38599:38855 [1] NCCL INFO Channel 01 : 2[1c000] -> 3[1c000] [receive] via NET/IB/3 +r8i0n3:57932:58124 [0] NCCL INFO Channel 01 : 10[1a000] -> 11[1a000] [receive] via NET/IB/3 +r7i3n0:38601:38857 [3] NCCL INFO Channel 01 : 2[8a000] -> 3[8a000] [receive] via NET/IB/2 +r9i1n7:8762:9310 [2] NCCL INFO Channel 01 : 14[88000] -> 15[88000] [receive] via NET/IB/2 +r9i1n7:8763:9312 [3] NCCL INFO Channel 01 : 14[8a000] -> 15[8a000] [receive] via NET/IB/2 +r7i3n0:38600:38856 [2] NCCL INFO Channel 01 : 2[88000] -> 3[88000] [receive] via NET/IB/2 +r9i1n7:8760:9309 [0] NCCL INFO Channel 01 : 14[1a000] -> 15[1a000] [receive] via NET/IB/3 +r7i3n0:38598:38854 [0] NCCL INFO Channel 01 : 2[1a000] -> 3[1a000] [receive] via NET/IB/3 +r7i7n0:55674:55900 [1] NCCL INFO Channel 00 : 8[1c000] -> 0[1c000] [send] via NET/IB/2 +r9i1n4:24818:25013 [0] NCCL INFO Channel 00 : 12[1a000] -> 8[1a000] [send] via NET/IB/2 +r7i6n8:29155:29343 [3] NCCL INFO Channel 01 : 6[8a000] -> 7[8a000] [receive] via NET/IB/2 +r7i7n0:55676:55902 [3] NCCL INFO Channel 00 : 12[8a000] -> 8[8a000] [receive] via NET/IB/3 +r7i7n0:55673:55899 [0] NCCL INFO Channel 00 : 8[1a000] -> 0[1a000] [send] via NET/IB/2 +r7i4n4:1768:2013 [2] NCCL INFO Channel 00 : 4[88000] -> 8[88000] [send] via NET/IB/3 +r7i7n2:57611:57835 [3] NCCL INFO Channel 00 : 10[8a000] -> 12[8a000] [send] via NET/IB/3 +r7i4n4:1766:2011 [0] NCCL INFO Channel 00 : 4[1a000] -> 8[1a000] [send] via NET/IB/2 +r7i7n2:57609:57833 [1] NCCL INFO Channel 00 : 12[1c000] -> 10[1c000] [receive] via NET/IB/2 +r7i4n4:1767:2012 [1] NCCL INFO Channel 00 : 4[1c000] -> 8[1c000] [send] via NET/IB/2 +r7i7n0:55675:55901 [2] NCCL INFO Channel 00 : 8[88000] -> 0[88000] [send] via NET/IB/3 +r7i6n8:29153:29342 [1] NCCL INFO Channel 01 : 7[1c000] -> 8[1c000] [send] via NET/IB/3 +r8i0n3:57935:58126 [3] NCCL INFO Channel 01 : 10[8a000] -> 11[8a000] [receive] via NET/IB/2 +r9i1n4:24819:25014 [1] NCCL INFO Channel 00 : 12[1c000] -> 8[1c000] [send] via NET/IB/2 +r9i1n4:24820:25012 [2] NCCL INFO Channel 00 : 12[88000] -> 8[88000] [send] via NET/IB/3 +r9i1n4:24821:25015 [3] NCCL INFO Channel 00 : 12[8a000] -> 8[8a000] [send] via NET/IB/3 +r7i6n8:29152:29340 [0] NCCL INFO Channel 01 : 7[1a000] -> 8[1a000] [send] via NET/IB/3 +r8i0n3:57933:58125 [1] NCCL INFO Channel 01 : 11[1c000] -> 12[1c000] [send] via NET/IB/3 +r7i4n4:1769:2014 [3] NCCL INFO Channel 00 : 4[8a000] -> 8[8a000] [send] via NET/IB/3 +r9i1n7:8761:9311 [1] NCCL INFO Channel 01 : 15[1c000] -> 0[1c000] [send] via NET/IB/3 +r7i5n3:79994:80190 [0] NCCL INFO Channel 00 : 4[1a000] -> 6[1a000] [receive] via NET/IB/2 +r7i3n0:38599:38855 [1] NCCL INFO Channel 01 : 3[1c000] -> 4[1c000] [send] via NET/IB/3 +r8i0n3:57934:58127 [2] NCCL INFO Channel 01 : 11[88000] -> 12[88000] [send] via NET/IB/2 +r8i0n3:57932:58124 [0] NCCL INFO Channel 01 : 11[1a000] -> 12[1a000] [send] via NET/IB/3 +r7i3n0:38601:38857 [3] NCCL INFO Channel 01 : 3[8a000] -> 4[8a000] [send] via NET/IB/2 +r9i1n7:8763:9312 [3] NCCL INFO Channel 01 : 15[8a000] -> 0[8a000] [send] via NET/IB/2 +r7i7n2:57610:57834 [2] NCCL INFO Channel 00 : 12[88000] -> 10[88000] [receive] via NET/IB/3 +r9i1n6:58313:58550 [1] NCCL INFO Channel 00 : 12[1c000] -> 14[1c000] [receive] via NET/IB/2 +r7i5n3:79996:80192 [2] NCCL INFO Channel 00 : 4[88000] -> 6[88000] [receive] via NET/IB/3 +r9i1n7:8760:9309 [0] NCCL INFO Channel 01 : 15[1a000] -> 0[1a000] [send] via NET/IB/3 +r7i5n3:79995:80191 [1] NCCL INFO Channel 00 : 4[1c000] -> 6[1c000] [receive] via NET/IB/2 +r7i7n2:57608:57832 [0] NCCL INFO Channel 00 : 12[1a000] -> 10[1a000] [receive] via NET/IB/2 +r7i2n6:894:1132 [2] NCCL INFO Channel 00 : 4[88000] -> 2[88000] [receive] via NET/IB/3 +r9i1n6:58314:58551 [2] NCCL INFO Channel 00 : 12[88000] -> 14[88000] [receive] via NET/IB/3 +r7i3n0:38598:38854 [0] NCCL INFO Channel 01 : 3[1a000] -> 4[1a000] [send] via NET/IB/3 +r7i6n8:29154:29341 [2] NCCL INFO Channel 01 : 7[88000] -> 8[88000] [send] via NET/IB/2 +r7i5n3:79997:80193 [3] NCCL INFO Channel 00 : 4[8a000] -> 6[8a000] [receive] via NET/IB/3 +r9i1n7:8762:9310 [2] NCCL INFO Channel 01 : 15[88000] -> 0[88000] [send] via NET/IB/2 +r7i3n0:38600:38856 [2] NCCL INFO Channel 01 : 3[88000] -> 4[88000] [send] via NET/IB/2 +r9i1n6:58315:58553 [3] NCCL INFO Channel 00 : 12[8a000] -> 14[8a000] [receive] via NET/IB/3 +r9i1n6:58312:58552 [0] NCCL INFO Channel 00 : 12[1a000] -> 14[1a000] [receive] via NET/IB/2 +r7i2n6:893:1131 [1] NCCL INFO Channel 00 : 4[1c000] -> 2[1c000] [receive] via NET/IB/2 +r7i2n6:895:1133 [3] NCCL INFO Channel 00 : 4[8a000] -> 2[8a000] [receive] via NET/IB/3 +r7i2n6:892:1130 [0] NCCL INFO Channel 00 : 4[1a000] -> 2[1a000] [receive] via NET/IB/2 +r7i6n8:29155:29343 [3] NCCL INFO Channel 01 : 7[8a000] -> 8[8a000] [send] via NET/IB/2 +r6i4n5:37340:38053 [1] NCCL INFO Channel 00 : 0[1c000] -> 8[1c000] [send] via NET/IB/2 +r7i7n0:55676:55902 [3] NCCL INFO Channel 00 : 8[8a000] -> 0[8a000] [send] via NET/IB/3 +r7i7n2:57609:57833 [1] NCCL INFO Channel 00 : 10[1c000] -> 9[1c000] [send] via NET/IB/2 +r8i0n3:57935:58126 [3] NCCL INFO Channel 01 : 11[8a000] -> 12[8a000] [send] via NET/IB/2 +r6i4n5:37339:38051 [0] NCCL INFO Channel 00 : 0[1a000] -> 8[1a000] [send] via NET/IB/2 +r6i4n5:37341:38054 [2] NCCL INFO Channel 00 : 0[88000] -> 8[88000] [send] via NET/IB/3 +r7i5n3:79994:80190 [0] NCCL INFO Channel 00 : 6[1a000] -> 5[1a000] [send] via NET/IB/2 +r7i7n2:57611:57835 [3] NCCL INFO Channel 00 : 12[8a000] -> 10[8a000] [receive] via NET/IB/3 +r9i1n6:58313:58550 [1] NCCL INFO Channel 00 : 14[1c000] -> 13[1c000] [send] via NET/IB/2 +r7i7n0:55673:55899 [0] NCCL INFO Channel 00 : 0[1a000] -> 8[1a000] [receive] via NET/IB/2 +r9i1n4:24818:25013 [0] NCCL INFO Channel 00 : 8[1a000] -> 12[1a000] [receive] via NET/IB/2 +r7i2n6:894:1132 [2] NCCL INFO Channel 00 : 2[88000] -> 1[88000] [send] via NET/IB/3 +r7i7n2:57608:57832 [0] NCCL INFO Channel 00 : 10[1a000] -> 9[1a000] [send] via NET/IB/2 +r7i7n2:57610:57834 [2] NCCL INFO Channel 00 : 10[88000] -> 9[88000] [send] via NET/IB/3 +r9i1n6:58314:58551 [2] NCCL INFO Channel 00 : 14[88000] -> 13[88000] [send] via NET/IB/3 +r7i7n0:55675:55901 [2] NCCL INFO Channel 00 : 0[88000] -> 8[88000] [receive] via NET/IB/3 +r7i5n3:79996:80192 [2] NCCL INFO Channel 00 : 6[88000] -> 5[88000] [send] via NET/IB/3 +r7i5n3:79995:80191 [1] NCCL INFO Channel 00 : 6[1c000] -> 5[1c000] [send] via NET/IB/2 +r7i7n0:55674:55900 [1] NCCL INFO Channel 00 : 0[1c000] -> 8[1c000] [receive] via NET/IB/2 +r7i5n3:79997:80193 [3] NCCL INFO Channel 00 : 6[8a000] -> 5[8a000] [send] via NET/IB/3 +r9i1n6:58315:58553 [3] NCCL INFO Channel 00 : 14[8a000] -> 13[8a000] [send] via NET/IB/3 +r9i1n6:58312:58552 [0] NCCL INFO Channel 00 : 14[1a000] -> 13[1a000] [send] via NET/IB/2 +r7i4n4:1768:2013 [2] NCCL INFO Channel 00 : 8[88000] -> 4[88000] [receive] via NET/IB/3 +r7i2n6:895:1133 [3] NCCL INFO Channel 00 : 2[8a000] -> 1[8a000] [send] via NET/IB/3 +r7i2n6:893:1131 [1] NCCL INFO Channel 00 : 2[1c000] -> 1[1c000] [send] via NET/IB/2 +r7i4n4:1766:2011 [0] NCCL INFO Channel 00 : 8[1a000] -> 4[1a000] [receive] via NET/IB/2 +r7i2n6:892:1130 [0] NCCL INFO Channel 00 : 2[1a000] -> 1[1a000] [send] via NET/IB/2 +r7i4n4:1767:2012 [1] NCCL INFO Channel 00 : 8[1c000] -> 4[1c000] [receive] via NET/IB/2 +r7i4n4:1769:2014 [3] NCCL INFO Channel 00 : 8[8a000] -> 4[8a000] [receive] via NET/IB/3 +r9i1n4:24820:25012 [2] NCCL INFO Channel 00 : 8[88000] -> 12[88000] [receive] via NET/IB/3 +r9i1n4:24819:25014 [1] NCCL INFO Channel 00 : 8[1c000] -> 12[1c000] [receive] via NET/IB/2 +r9i1n4:24821:25015 [3] NCCL INFO Channel 00 : 8[8a000] -> 12[8a000] [receive] via NET/IB/3 +r6i4n5:37342:38056 [3] NCCL INFO Channel 00 : 0[8a000] -> 8[8a000] [send] via NET/IB/3 +r7i7n1:68405:68634 [1] NCCL INFO Channel 01 : 8[1c000] -> 9[1c000] [receive] via NET/IB/3 +r7i7n2:57611:57835 [3] NCCL INFO Channel 00 : 10[8a000] -> 9[8a000] [send] via NET/IB/3 +r7i7n0:55676:55902 [3] NCCL INFO Channel 00 : 0[8a000] -> 8[8a000] [receive] via NET/IB/3 +r7i7n0:55673:55899 [0] NCCL INFO Channel 00 : 8[1a000] -> 4[1a000] [send] via NET/IB/2 +r9i1n4:24818:25013 [0] NCCL INFO Channel 00 : 12[1a000] -> 10[1a000] [send] via NET/IB/2 +r7i7n0:55675:55901 [2] NCCL INFO Channel 00 : 8[88000] -> 4[88000] [send] via NET/IB/3 +r7i7n0:55674:55900 [1] NCCL INFO Channel 00 : 8[1c000] -> 4[1c000] [send] via NET/IB/2 +r6i4n5:37339:38051 [0] NCCL INFO Channel 01 : 15[1a000] -> 0[1a000] [receive] via NET/IB/3 +r7i1n3:4943:5143 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [receive] via NET/IB/2 +r6i4n5:37341:38054 [2] NCCL INFO Channel 01 : 15[88000] -> 0[88000] [receive] via NET/IB/2 +r9i1n5:40814:41075 [1] NCCL INFO Channel 01 : 12[1c000] -> 13[1c000] [receive] via NET/IB/3 +r7i4n4:1768:2013 [2] NCCL INFO Channel 00 : 4[88000] -> 2[88000] [send] via NET/IB/3 +r7i7n1:68406:68633 [2] NCCL INFO Channel 01 : 8[88000] -> 9[88000] [receive] via NET/IB/2 +r9i1n5:40815:41077 [2] NCCL INFO Channel 01 : 12[88000] -> 13[88000] [receive] via NET/IB/2 +r6i4n5:37340:38053 [1] NCCL INFO Channel 01 : 15[1c000] -> 0[1c000] [receive] via NET/IB/3 +r7i4n5:46158:46356 [0] NCCL INFO Channel 01 : 4[1a000] -> 5[1a000] [receive] via NET/IB/3 +r7i4n4:1766:2011 [0] NCCL INFO Channel 00 : 4[1a000] -> 2[1a000] [send] via NET/IB/2 +r7i4n4:1767:2012 [1] NCCL INFO Channel 00 : 4[1c000] -> 2[1c000] [send] via NET/IB/2 +r7i4n5:46160:46357 [2] NCCL INFO Channel 01 : 4[88000] -> 5[88000] [receive] via NET/IB/2 +r7i7n1:68404:68632 [0] NCCL INFO Channel 01 : 8[1a000] -> 9[1a000] [receive] via NET/IB/3 +r7i4n4:1769:2014 [3] NCCL INFO Channel 00 : 4[8a000] -> 2[8a000] [send] via NET/IB/3 +r7i4n5:46161:46359 [3] NCCL INFO Channel 01 : 4[8a000] -> 5[8a000] [receive] via NET/IB/2 +r9i1n4:24820:25012 [2] NCCL INFO Channel 00 : 12[88000] -> 10[88000] [send] via NET/IB/3 +r7i4n5:46159:46358 [1] NCCL INFO Channel 01 : 4[1c000] -> 5[1c000] [receive] via NET/IB/3 +r9i1n5:40816:41078 [3] NCCL INFO Channel 01 : 12[8a000] -> 13[8a000] [receive] via NET/IB/2 +r9i1n4:24819:25014 [1] NCCL INFO Channel 00 : 12[1c000] -> 10[1c000] [send] via NET/IB/2 +r9i1n4:24821:25015 [3] NCCL INFO Channel 00 : 12[8a000] -> 10[8a000] [send] via NET/IB/3 +r7i7n1:68405:68634 [1] NCCL INFO Channel 01 : 9[1c000] -> 10[1c000] [send] via NET/IB/3 +r9i1n5:40813:41076 [0] NCCL INFO Channel 01 : 12[1a000] -> 13[1a000] [receive] via NET/IB/3 +r7i1n3:4944:5145 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [receive] via NET/IB/2 +r7i1n3:4942:5142 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [receive] via NET/IB/3 +r7i1n3:4941:5144 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [receive] via NET/IB/3 +r7i7n0:55676:55902 [3] NCCL INFO Channel 00 : 8[8a000] -> 4[8a000] [send] via NET/IB/3 +r7i7n0:55673:55899 [0] NCCL INFO Channel 00 : 8[1a000] -> 12[1a000] [send] via NET/IB/2 +r9i1n4:24818:25013 [0] NCCL INFO Channel 00 : 12[1a000] -> 14[1a000] [send] via NET/IB/2 +r7i7n1:68407:68635 [3] NCCL INFO Channel 01 : 8[8a000] -> 9[8a000] [receive] via NET/IB/2 +r6i4n5:37342:38056 [3] NCCL INFO Channel 01 : 15[8a000] -> 0[8a000] [receive] via NET/IB/2 +r7i7n0:55674:55900 [1] NCCL INFO Channel 00 : 8[1c000] -> 12[1c000] [send] via NET/IB/2 +r7i7n0:55675:55901 [2] NCCL INFO Channel 00 : 8[88000] -> 12[88000] [send] via NET/IB/3 +r7i4n4:1768:2013 [2] NCCL INFO Channel 00 : 4[88000] -> 6[88000] [send] via NET/IB/3 +r7i1n3:4943:5143 [2] NCCL INFO Channel 01 : 1[88000] -> 2[88000] [send] via NET/IB/2 +r6i4n5:37339:38051 [0] NCCL INFO Channel 01 : 0[1a000] -> 1[1a000] [send] via NET/IB/3 +r9i1n5:40814:41075 [1] NCCL INFO Channel 01 : 13[1c000] -> 14[1c000] [send] via NET/IB/3 +r6i4n5:37341:38054 [2] NCCL INFO Channel 01 : 0[88000] -> 1[88000] [send] via NET/IB/2 +r6i4n5:37340:38053 [1] NCCL INFO Channel 01 : 0[1c000] -> 1[1c000] [send] via NET/IB/3 +r7i4n5:46158:46356 [0] NCCL INFO Channel 01 : 5[1a000] -> 6[1a000] [send] via NET/IB/3 +r7i7n1:68404:68632 [0] NCCL INFO Channel 01 : 9[1a000] -> 10[1a000] [send] via NET/IB/3 +r9i1n5:40815:41077 [2] NCCL INFO Channel 01 : 13[88000] -> 14[88000] [send] via NET/IB/2 +r7i7n1:68406:68633 [2] NCCL INFO Channel 01 : 9[88000] -> 10[88000] [send] via NET/IB/2 +r7i4n5:46161:46359 [3] NCCL INFO Channel 01 : 5[8a000] -> 6[8a000] [send] via NET/IB/2 +r7i4n4:1766:2011 [0] NCCL INFO Channel 00 : 4[1a000] -> 6[1a000] [send] via NET/IB/2 +r9i1n5:40816:41078 [3] NCCL INFO Channel 01 : 13[8a000] -> 14[8a000] [send] via NET/IB/2 +r7i4n4:1769:2014 [3] NCCL INFO Channel 00 : 4[8a000] -> 6[8a000] [send] via NET/IB/3 +r7i4n5:46159:46358 [1] NCCL INFO Channel 01 : 5[1c000] -> 6[1c000] [send] via NET/IB/3 +r9i1n4:24820:25012 [2] NCCL INFO Channel 00 : 12[88000] -> 14[88000] [send] via NET/IB/3 +r7i4n4:1767:2012 [1] NCCL INFO Channel 00 : 4[1c000] -> 6[1c000] [send] via NET/IB/2 +r7i4n5:46160:46357 [2] NCCL INFO Channel 01 : 5[88000] -> 6[88000] [send] via NET/IB/2 +r9i1n5:40813:41076 [0] NCCL INFO Channel 01 : 13[1a000] -> 14[1a000] [send] via NET/IB/3 +r9i1n4:24819:25014 [1] NCCL INFO Channel 00 : 12[1c000] -> 14[1c000] [send] via NET/IB/2 +r7i1n3:4944:5145 [3] NCCL INFO Channel 01 : 1[8a000] -> 2[8a000] [send] via NET/IB/2 +r9i1n4:24821:25015 [3] NCCL INFO Channel 00 : 12[8a000] -> 14[8a000] [send] via NET/IB/3 +r7i7n0:55676:55902 [3] NCCL INFO Channel 00 : 8[8a000] -> 12[8a000] [send] via NET/IB/3 +r7i1n3:4942:5142 [1] NCCL INFO Channel 01 : 1[1c000] -> 2[1c000] [send] via NET/IB/3 +r7i7n1:68407:68635 [3] NCCL INFO Channel 01 : 9[8a000] -> 10[8a000] [send] via NET/IB/2 +r7i1n3:4941:5144 [0] NCCL INFO Channel 01 : 1[1a000] -> 2[1a000] [send] via NET/IB/3 +r6i4n5:37342:38056 [3] NCCL INFO Channel 01 : 0[8a000] -> 1[8a000] [send] via NET/IB/2 +r7i7n2:57608:57832 [0] NCCL INFO Channel 01 : 9[1a000] -> 10[1a000] [receive] via NET/IB/3 +r9i1n6:58312:58552 [0] NCCL INFO Channel 01 : 13[1a000] -> 14[1a000] [receive] via NET/IB/3 +r6i4n5:37341:38054 [2] NCCL INFO Channel 01 : 0[88000] -> 15[88000] [send] via NET/IB/3 +r7i7n0:55674:55900 [1] NCCL INFO Channel 01 : 7[1c000] -> 8[1c000] [receive] via NET/IB/3 +r7i2n6:894:1132 [2] NCCL INFO Channel 01 : 1[88000] -> 2[88000] [receive] via NET/IB/2 +r9i1n4:24818:25013 [0] NCCL INFO Channel 01 : 11[1a000] -> 12[1a000] [receive] via NET/IB/3 +r6i4n5:37340:38053 [1] NCCL INFO Channel 01 : 0[1c000] -> 15[1c000] [send] via NET/IB/2 +r6i4n5:37339:38051 [0] NCCL INFO Channel 01 : 0[1a000] -> 15[1a000] [send] via NET/IB/2 +r7i7n0:55673:55899 [0] NCCL INFO Channel 01 : 7[1a000] -> 8[1a000] [receive] via NET/IB/3 +r7i2n6:893:1131 [1] NCCL INFO Channel 01 : 1[1c000] -> 2[1c000] [receive] via NET/IB/3 +r7i2n6:895:1133 [3] NCCL INFO Channel 01 : 1[8a000] -> 2[8a000] [receive] via NET/IB/2 +r7i7n0:55675:55901 [2] NCCL INFO Channel 01 : 7[88000] -> 8[88000] [receive] via NET/IB/2 +r7i7n2:57609:57833 [1] NCCL INFO Channel 01 : 9[1c000] -> 10[1c000] [receive] via NET/IB/3 +r7i7n2:57611:57835 [3] NCCL INFO Channel 01 : 9[8a000] -> 10[8a000] [receive] via NET/IB/2 +r7i7n2:57610:57834 [2] NCCL INFO Channel 01 : 9[88000] -> 10[88000] [receive] via NET/IB/2 +r7i2n6:892:1130 [0] NCCL INFO Channel 01 : 1[1a000] -> 2[1a000] [receive] via NET/IB/3 +r7i5n3:79997:80193 [3] NCCL INFO Channel 01 : 5[8a000] -> 6[8a000] [receive] via NET/IB/2 +r9i1n6:58313:58550 [1] NCCL INFO Channel 01 : 13[1c000] -> 14[1c000] [receive] via NET/IB/3 +r7i5n3:79995:80191 [1] NCCL INFO Channel 01 : 5[1c000] -> 6[1c000] [receive] via NET/IB/3 +r7i5n3:79996:80192 [2] NCCL INFO Channel 01 : 5[88000] -> 6[88000] [receive] via NET/IB/2 +r9i1n4:24819:25014 [1] NCCL INFO Channel 01 : 11[1c000] -> 12[1c000] [receive] via NET/IB/3 +r7i7n0:55676:55902 [3] NCCL INFO Channel 01 : 7[8a000] -> 8[8a000] [receive] via NET/IB/2 +r7i4n4:1768:2013 [2] NCCL INFO Channel 01 : 3[88000] -> 4[88000] [receive] via NET/IB/2 +r7i5n3:79994:80190 [0] NCCL INFO Channel 01 : 5[1a000] -> 6[1a000] [receive] via NET/IB/3 +r7i4n4:1767:2012 [1] NCCL INFO Channel 01 : 3[1c000] -> 4[1c000] [receive] via NET/IB/3 +r9i1n6:58315:58553 [3] NCCL INFO Channel 01 : 13[8a000] -> 14[8a000] [receive] via NET/IB/2 +r7i4n4:1769:2014 [3] NCCL INFO Channel 01 : 3[8a000] -> 4[8a000] [receive] via NET/IB/2 +r9i1n6:58314:58551 [2] NCCL INFO Channel 01 : 13[88000] -> 14[88000] [receive] via NET/IB/2 +r9i1n4:24821:25015 [3] NCCL INFO Channel 01 : 11[8a000] -> 12[8a000] [receive] via NET/IB/2 +r9i1n4:24820:25012 [2] NCCL INFO Channel 01 : 11[88000] -> 12[88000] [receive] via NET/IB/2 +r7i4n4:1766:2011 [0] NCCL INFO Channel 01 : 3[1a000] -> 4[1a000] [receive] via NET/IB/3 +r7i7n2:57608:57832 [0] NCCL INFO Channel 01 : 10[1a000] -> 11[1a000] [send] via NET/IB/3 +r6i4n5:37342:38056 [3] NCCL INFO Channel 01 : 0[8a000] -> 15[8a000] [send] via NET/IB/3 +r9i1n6:58312:58552 [0] NCCL INFO Channel 01 : 14[1a000] -> 15[1a000] [send] via NET/IB/3 +r7i7n0:55674:55900 [1] NCCL INFO Channel 01 : 8[1c000] -> 9[1c000] [send] via NET/IB/3 +r7i2n6:894:1132 [2] NCCL INFO Channel 01 : 2[88000] -> 3[88000] [send] via NET/IB/2 +r9i1n4:24818:25013 [0] NCCL INFO Channel 01 : 12[1a000] -> 13[1a000] [send] via NET/IB/3 +r7i7n0:55673:55899 [0] NCCL INFO Channel 01 : 8[1a000] -> 9[1a000] [send] via NET/IB/3 +r7i2n6:893:1131 [1] NCCL INFO Channel 01 : 2[1c000] -> 3[1c000] [send] via NET/IB/3 +r7i7n2:57609:57833 [1] NCCL INFO Channel 01 : 10[1c000] -> 11[1c000] [send] via NET/IB/3 +r7i7n0:55675:55901 [2] NCCL INFO Channel 01 : 8[88000] -> 9[88000] [send] via NET/IB/2 +r7i2n6:895:1133 [3] NCCL INFO Channel 01 : 2[8a000] -> 3[8a000] [send] via NET/IB/2 +r7i7n2:57611:57835 [3] NCCL INFO Channel 01 : 10[8a000] -> 11[8a000] [send] via NET/IB/2 +r7i7n2:57610:57834 [2] NCCL INFO Channel 01 : 10[88000] -> 11[88000] [send] via NET/IB/2 +r9i1n6:58313:58550 [1] NCCL INFO Channel 01 : 14[1c000] -> 15[1c000] [send] via NET/IB/3 +r7i1n3:4943:5143 [2] NCCL INFO Channel 01 : 9[88000] -> 1[88000] [receive] via NET/IB/3 +r7i2n6:892:1130 [0] NCCL INFO Channel 01 : 2[1a000] -> 3[1a000] [send] via NET/IB/3 +r7i5n3:79997:80193 [3] NCCL INFO Channel 01 : 6[8a000] -> 7[8a000] [send] via NET/IB/2 +r7i7n0:55676:55902 [3] NCCL INFO Channel 01 : 8[8a000] -> 9[8a000] [send] via NET/IB/2 +r9i1n4:24819:25014 [1] NCCL INFO Channel 01 : 12[1c000] -> 13[1c000] [send] via NET/IB/3 +r7i4n4:1768:2013 [2] NCCL INFO Channel 01 : 4[88000] -> 5[88000] [send] via NET/IB/2 +r7i5n3:79995:80191 [1] NCCL INFO Channel 01 : 6[1c000] -> 7[1c000] [send] via NET/IB/3 +r9i1n6:58315:58553 [3] NCCL INFO Channel 01 : 14[8a000] -> 15[8a000] [send] via NET/IB/2 +r9i1n6:58314:58551 [2] NCCL INFO Channel 01 : 14[88000] -> 15[88000] [send] via NET/IB/2 +r7i5n3:79996:80192 [2] NCCL INFO Channel 01 : 6[88000] -> 7[88000] [send] via NET/IB/2 +r7i4n4:1767:2012 [1] NCCL INFO Channel 01 : 4[1c000] -> 5[1c000] [send] via NET/IB/3 +r7i1n3:4944:5145 [3] NCCL INFO Channel 01 : 9[8a000] -> 1[8a000] [receive] via NET/IB/3 +r7i4n4:1769:2014 [3] NCCL INFO Channel 01 : 4[8a000] -> 5[8a000] [send] via NET/IB/2 +r9i1n4:24820:25012 [2] NCCL INFO Channel 01 : 12[88000] -> 13[88000] [send] via NET/IB/2 +r9i1n4:24821:25015 [3] NCCL INFO Channel 01 : 12[8a000] -> 13[8a000] [send] via NET/IB/2 +r7i4n4:1766:2011 [0] NCCL INFO Channel 01 : 4[1a000] -> 5[1a000] [send] via NET/IB/3 +r7i5n3:79994:80190 [0] NCCL INFO Channel 01 : 6[1a000] -> 7[1a000] [send] via NET/IB/3 +r7i1n3:4942:5142 [1] NCCL INFO Channel 01 : 9[1c000] -> 1[1c000] [receive] via NET/IB/2 +r7i1n3:4941:5144 [0] NCCL INFO Channel 01 : 9[1a000] -> 1[1a000] [receive] via NET/IB/2 +r8i0n3:57932:58124 [0] NCCL INFO Channel 01 : 12[1a000] -> 11[1a000] [receive] via NET/IB/2 +r7i7n2:57608:57832 [0] NCCL INFO Channel 01 : 11[1a000] -> 10[1a000] [receive] via NET/IB/2 +r9i1n7:8760:9309 [0] NCCL INFO Channel 01 : 0[1a000] -> 15[1a000] [receive] via NET/IB/2 +r7i3n0:38600:38856 [2] NCCL INFO Channel 01 : 4[88000] -> 3[88000] [receive] via NET/IB/3 +r9i1n5:40813:41076 [0] NCCL INFO Channel 01 : 11[1a000] -> 13[1a000] [receive] via NET/IB/2 +r6i4n5:37339:38051 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n1:68405:68634 [1] NCCL INFO Channel 01 : 5[1c000] -> 9[1c000] [receive] via NET/IB/2 +r9i1n6:58312:58552 [0] NCCL INFO Channel 01 : 15[1a000] -> 14[1a000] [receive] via NET/IB/2 +r6i4n5:37339:38051 [0] NCCL INFO comm 0x147894001060 rank 0 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r6i4n5:37339:37339 [0] NCCL INFO Launch mode Parallel +r7i7n0:55674:55900 [1] NCCL INFO Channel 01 : 8[1c000] -> 7[1c000] [send] via NET/IB/2 +r9i1n4:24818:25013 [0] NCCL INFO Channel 01 : 12[1a000] -> 11[1a000] [send] via NET/IB/2 +r7i7n1:68404:68632 [0] NCCL INFO Channel 01 : 5[1a000] -> 9[1a000] [receive] via NET/IB/2 +r7i2n6:894:1132 [2] NCCL INFO Channel 01 : 3[88000] -> 2[88000] [receive] via NET/IB/3 +r7i3n0:38599:38855 [1] NCCL INFO Channel 01 : 4[1c000] -> 3[1c000] [receive] via NET/IB/2 +r7i7n1:68406:68633 [2] NCCL INFO Channel 01 : 5[88000] -> 9[88000] [receive] via NET/IB/3 +r7i7n0:55673:55899 [0] NCCL INFO Channel 01 : 8[1a000] -> 7[1a000] [send] via NET/IB/2 +r8i0n3:57933:58125 [1] NCCL INFO Channel 01 : 12[1c000] -> 11[1c000] [receive] via NET/IB/2 +r7i3n0:38601:38857 [3] NCCL INFO Channel 01 : 4[8a000] -> 3[8a000] [receive] via NET/IB/3 +r7i2n6:893:1131 [1] NCCL INFO Channel 01 : 3[1c000] -> 2[1c000] [receive] via NET/IB/2 +r9i1n4:24818:25013 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n0:55675:55901 [2] NCCL INFO Channel 01 : 8[88000] -> 7[88000] [send] via NET/IB/3 +r7i7n2:57609:57833 [1] NCCL INFO Channel 01 : 11[1c000] -> 10[1c000] [receive] via NET/IB/2 +r9i1n4:24818:25013 [0] NCCL INFO comm 0x150e70001060 rank 12 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r7i2n6:895:1133 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[8a000] [receive] via NET/IB/3 +r9i1n7:8761:9311 [1] NCCL INFO Channel 01 : 0[1c000] -> 15[1c000] [receive] via NET/IB/2 +r7i3n0:38598:38854 [0] NCCL INFO Channel 01 : 4[1a000] -> 3[1a000] [receive] via NET/IB/2 +r7i7n1:68407:68635 [3] NCCL INFO Channel 01 : 5[8a000] -> 9[8a000] [receive] via NET/IB/3 +r8i0n3:57935:58126 [3] NCCL INFO Channel 01 : 12[8a000] -> 11[8a000] [receive] via NET/IB/3 +r9i1n5:40814:41075 [1] NCCL INFO Channel 01 : 11[1c000] -> 13[1c000] [receive] via NET/IB/2 +r8i0n3:57934:58127 [2] NCCL INFO Channel 01 : 12[88000] -> 11[88000] [receive] via NET/IB/3 +r9i1n6:58313:58550 [1] NCCL INFO Channel 01 : 15[1c000] -> 14[1c000] [receive] via NET/IB/2 +r7i2n6:892:1130 [0] NCCL INFO Channel 01 : 3[1a000] -> 2[1a000] [receive] via NET/IB/2 +r7i6n8:29155:29343 [3] NCCL INFO Channel 01 : 8[8a000] -> 7[8a000] [receive] via NET/IB/3 +r7i4n5:46160:46357 [2] NCCL INFO Channel 01 : 3[88000] -> 5[88000] [receive] via NET/IB/3 +r7i7n0:55676:55902 [3] NCCL INFO Channel 01 : 8[8a000] -> 7[8a000] [send] via NET/IB/3 +r6i4n5:37340:38053 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n2:57611:57835 [3] NCCL INFO Channel 01 : 11[8a000] -> 10[8a000] [receive] via NET/IB/3 +r6i4n5:37340:38053 [1] NCCL INFO comm 0x153e20001060 rank 0 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n4:24819:25014 [1] NCCL INFO Channel 01 : 12[1c000] -> 11[1c000] [send] via NET/IB/2 +r7i7n2:57610:57834 [2] NCCL INFO Channel 01 : 11[88000] -> 10[88000] [receive] via NET/IB/3 +r7i4n4:1768:2013 [2] NCCL INFO Channel 01 : 4[88000] -> 3[88000] [send] via NET/IB/3 +r6i4n5:37340:37340 [1] NCCL INFO Launch mode Parallel +r7i5n3:79997:80193 [3] NCCL INFO Channel 01 : 7[8a000] -> 6[8a000] [receive] via NET/IB/3 +r9i1n7:8762:9310 [2] NCCL INFO Channel 01 : 0[88000] -> 15[88000] [receive] via NET/IB/3 +r9i1n7:8763:9312 [3] NCCL INFO Channel 01 : 0[8a000] -> 15[8a000] [receive] via NET/IB/3 +r8i0n3:57932:58124 [0] NCCL INFO Channel 01 : 11[1a000] -> 13[1a000] [send] via NET/IB/2 +r7i5n3:79995:80191 [1] NCCL INFO Channel 01 : 7[1c000] -> 6[1c000] [receive] via NET/IB/2 +r7i6n8:29153:29342 [1] NCCL INFO Channel 01 : 8[1c000] -> 7[1c000] [receive] via NET/IB/2 +r7i7n0:55676:55902 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i6n8:29154:29341 [2] NCCL INFO Channel 01 : 8[88000] -> 7[88000] [receive] via NET/IB/3 +r9i1n5:40815:41077 [2] NCCL INFO Channel 01 : 11[88000] -> 13[88000] [receive] via NET/IB/3 +r7i6n8:29152:29340 [0] NCCL INFO Channel 01 : 8[1a000] -> 7[1a000] [receive] via NET/IB/2 +r7i7n0:55676:55902 [3] NCCL INFO comm 0x150be8001060 rank 8 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n4:24819:25014 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n5:40816:41078 [3] NCCL INFO Channel 01 : 11[8a000] -> 13[8a000] [receive] via NET/IB/3 +r7i4n4:1768:2013 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n6:58315:58553 [3] NCCL INFO Channel 01 : 15[8a000] -> 14[8a000] [receive] via NET/IB/3 +r9i1n4:24819:25014 [1] NCCL INFO comm 0x14f050001060 rank 12 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n6:58314:58551 [2] NCCL INFO Channel 01 : 15[88000] -> 14[88000] [receive] via NET/IB/3 +r7i5n3:79996:80192 [2] NCCL INFO Channel 01 : 7[88000] -> 6[88000] [receive] via NET/IB/3 +r7i4n4:1768:2013 [2] NCCL INFO comm 0x1536d8001060 rank 4 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r7i5n3:79994:80190 [0] NCCL INFO Channel 01 : 7[1a000] -> 6[1a000] [receive] via NET/IB/2 +r9i1n7:8760:9309 [0] NCCL INFO Channel 01 : 15[1a000] -> 13[1a000] [send] via NET/IB/2 +r7i4n5:46159:46358 [1] NCCL INFO Channel 01 : 3[1c000] -> 5[1c000] [receive] via NET/IB/2 +r7i4n5:46161:46359 [3] NCCL INFO Channel 01 : 3[8a000] -> 5[8a000] [receive] via NET/IB/3 +r7i7n0:55674:55900 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n4:24821:25015 [3] NCCL INFO Channel 01 : 12[8a000] -> 11[8a000] [send] via NET/IB/3 +r7i4n5:46158:46356 [0] NCCL INFO Channel 01 : 3[1a000] -> 5[1a000] [receive] via NET/IB/2 +r6i4n5:37341:38054 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n4:24820:25012 [2] NCCL INFO Channel 01 : 12[88000] -> 11[88000] [send] via NET/IB/3 +r6i4n5:37342:38056 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n0:55674:55900 [1] NCCL INFO comm 0x14d9ec001060 rank 8 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r6i4n5:37341:38054 [2] NCCL INFO comm 0x149f70001060 rank 0 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r6i4n5:37342:38056 [3] NCCL INFO comm 0x1526c0001060 rank 0 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n0:55675:55901 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r6i4n5:37341:37341 [2] NCCL INFO Launch mode Parallel +r7i7n0:55673:55899 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r6i4n5:37342:37342 [3] NCCL INFO Launch mode Parallel +r7i4n4:1767:2012 [1] NCCL INFO Channel 01 : 4[1c000] -> 3[1c000] [send] via NET/IB/2 +r7i3n0:38600:38856 [2] NCCL INFO Channel 01 : 3[88000] -> 5[88000] [send] via NET/IB/3 +r7i7n0:55675:55901 [2] NCCL INFO comm 0x148d0c001060 rank 8 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n0:55673:55899 [0] NCCL INFO comm 0x148d98001060 rank 8 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n4:1769:2014 [3] NCCL INFO Channel 01 : 4[8a000] -> 3[8a000] [send] via NET/IB/3 +r7i7n1:68405:68634 [1] NCCL INFO Channel 01 : 13[1c000] -> 9[1c000] [receive] via NET/IB/2 +r9i1n5:40813:41076 [0] NCCL INFO Channel 01 : 15[1a000] -> 13[1a000] [receive] via NET/IB/2 +r7i4n4:1766:2011 [0] NCCL INFO Channel 01 : 4[1a000] -> 3[1a000] [send] via NET/IB/2 +r9i1n4:24820:25012 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n4:24821:25015 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n4:24820:25012 [2] NCCL INFO comm 0x151c90001060 rank 12 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n4:24821:25015 [3] NCCL INFO comm 0x150d80001060 rank 12 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n1:68404:68632 [0] NCCL INFO Channel 01 : 13[1a000] -> 9[1a000] [receive] via NET/IB/2 +r7i7n1:68406:68633 [2] NCCL INFO Channel 01 : 13[88000] -> 9[88000] [receive] via NET/IB/3 +r7i4n4:1769:2014 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r8i0n3:57933:58125 [1] NCCL INFO Channel 01 : 11[1c000] -> 13[1c000] [send] via NET/IB/2 +r7i3n0:38599:38855 [1] NCCL INFO Channel 01 : 3[1c000] -> 5[1c000] [send] via NET/IB/2 +r7i4n4:1767:2012 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n4:1766:2011 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i3n0:38601:38857 [3] NCCL INFO Channel 01 : 3[8a000] -> 5[8a000] [send] via NET/IB/3 +r7i4n4:1767:2012 [1] NCCL INFO comm 0x14978c001060 rank 4 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r7i4n4:1766:2011 [0] NCCL INFO comm 0x14e224001060 rank 4 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n4:1769:2014 [3] NCCL INFO comm 0x149cc0001060 rank 4 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n1:68407:68635 [3] NCCL INFO Channel 01 : 13[8a000] -> 9[8a000] [receive] via NET/IB/3 +r9i1n7:8761:9311 [1] NCCL INFO Channel 01 : 15[1c000] -> 13[1c000] [send] via NET/IB/2 +r7i3n0:38598:38854 [0] NCCL INFO Channel 01 : 3[1a000] -> 5[1a000] [send] via NET/IB/2 +r8i0n3:57935:58126 [3] NCCL INFO Channel 01 : 11[8a000] -> 13[8a000] [send] via NET/IB/3 +r9i1n5:40814:41075 [1] NCCL INFO Channel 01 : 15[1c000] -> 13[1c000] [receive] via NET/IB/2 +r7i4n5:46160:46357 [2] NCCL INFO Channel 01 : 7[88000] -> 5[88000] [receive] via NET/IB/3 +r7i6n8:29155:29343 [3] NCCL INFO Channel 01 : 7[8a000] -> 5[8a000] [send] via NET/IB/3 +r8i0n3:57934:58127 [2] NCCL INFO Channel 01 : 11[88000] -> 13[88000] [send] via NET/IB/3 +r9i1n5:40815:41077 [2] NCCL INFO Channel 01 : 15[88000] -> 13[88000] [receive] via NET/IB/3 +r9i1n7:8762:9310 [2] NCCL INFO Channel 01 : 15[88000] -> 13[88000] [send] via NET/IB/3 +r7i6n8:29153:29342 [1] NCCL INFO Channel 01 : 7[1c000] -> 5[1c000] [send] via NET/IB/2 +r9i1n7:8763:9312 [3] NCCL INFO Channel 01 : 15[8a000] -> 13[8a000] [send] via NET/IB/3 +r7i6n8:29154:29341 [2] NCCL INFO Channel 01 : 7[88000] -> 5[88000] [send] via NET/IB/3 +r9i1n5:40816:41078 [3] NCCL INFO Channel 01 : 15[8a000] -> 13[8a000] [receive] via NET/IB/3 +r7i6n8:29152:29340 [0] NCCL INFO Channel 01 : 7[1a000] -> 5[1a000] [send] via NET/IB/2 +r7i7n1:68405:68634 [1] NCCL INFO Channel 01 : 9[1c000] -> 1[1c000] [send] via NET/IB/2 +r7i4n5:46159:46358 [1] NCCL INFO Channel 01 : 7[1c000] -> 5[1c000] [receive] via NET/IB/2 +r7i4n5:46161:46359 [3] NCCL INFO Channel 01 : 7[8a000] -> 5[8a000] [receive] via NET/IB/3 +r7i4n5:46158:46356 [0] NCCL INFO Channel 01 : 7[1a000] -> 5[1a000] [receive] via NET/IB/2 +r9i1n5:40813:41076 [0] NCCL INFO Channel 01 : 13[1a000] -> 9[1a000] [send] via NET/IB/2 +r8i0n3:57932:58124 [0] NCCL INFO Channel 01 : 13[1a000] -> 11[1a000] [receive] via NET/IB/2 +r7i7n1:68404:68632 [0] NCCL INFO Channel 01 : 9[1a000] -> 1[1a000] [send] via NET/IB/2 +r7i7n1:68406:68633 [2] NCCL INFO Channel 01 : 9[88000] -> 1[88000] [send] via NET/IB/3 +r9i1n7:8760:9309 [0] NCCL INFO Channel 01 : 13[1a000] -> 15[1a000] [receive] via NET/IB/2 +r7i3n0:38600:38856 [2] NCCL INFO Channel 01 : 5[88000] -> 3[88000] [receive] via NET/IB/3 +r7i7n1:68407:68635 [3] NCCL INFO Channel 01 : 9[8a000] -> 1[8a000] [send] via NET/IB/3 +r7i4n5:46160:46357 [2] NCCL INFO Channel 01 : 5[88000] -> 9[88000] [send] via NET/IB/3 +r9i1n5:40814:41075 [1] NCCL INFO Channel 01 : 13[1c000] -> 9[1c000] [send] via NET/IB/2 +r8i0n3:57933:58125 [1] NCCL INFO Channel 01 : 13[1c000] -> 11[1c000] [receive] via NET/IB/2 +r7i3n0:38599:38855 [1] NCCL INFO Channel 01 : 5[1c000] -> 3[1c000] [receive] via NET/IB/2 +r7i3n0:38601:38857 [3] NCCL INFO Channel 01 : 5[8a000] -> 3[8a000] [receive] via NET/IB/3 +r9i1n7:8761:9311 [1] NCCL INFO Channel 01 : 13[1c000] -> 15[1c000] [receive] via NET/IB/2 +r8i0n3:57935:58126 [3] NCCL INFO Channel 01 : 13[8a000] -> 11[8a000] [receive] via NET/IB/3 +r9i1n5:40815:41077 [2] NCCL INFO Channel 01 : 13[88000] -> 9[88000] [send] via NET/IB/3 +r9i1n5:40816:41078 [3] NCCL INFO Channel 01 : 13[8a000] -> 9[8a000] [send] via NET/IB/3 +r7i3n0:38598:38854 [0] NCCL INFO Channel 01 : 5[1a000] -> 3[1a000] [receive] via NET/IB/2 +r8i0n3:57934:58127 [2] NCCL INFO Channel 01 : 13[88000] -> 11[88000] [receive] via NET/IB/3 +r7i4n5:46161:46359 [3] NCCL INFO Channel 01 : 5[8a000] -> 9[8a000] [send] via NET/IB/3 +r7i4n5:46159:46358 [1] NCCL INFO Channel 01 : 5[1c000] -> 9[1c000] [send] via NET/IB/2 +r8i0n3:57932:58124 [0] NCCL INFO Channel 01 : 11[1a000] -> 10[1a000] [send] via NET/IB/2 +r9i1n7:8762:9310 [2] NCCL INFO Channel 01 : 13[88000] -> 15[88000] [receive] via NET/IB/3 +r7i4n5:46158:46356 [0] NCCL INFO Channel 01 : 5[1a000] -> 9[1a000] [send] via NET/IB/2 +r9i1n7:8763:9312 [3] NCCL INFO Channel 01 : 13[8a000] -> 15[8a000] [receive] via NET/IB/3 +r7i6n8:29154:29341 [2] NCCL INFO Channel 01 : 5[88000] -> 7[88000] [receive] via NET/IB/3 +r7i1n3:4942:5142 [1] NCCL INFO Channel 01 : 1[1c000] -> 9[1c000] [send] via NET/IB/2 +r7i7n2:57608:57832 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n7:8760:9309 [0] NCCL INFO Channel 01 : 15[1a000] -> 14[1a000] [send] via NET/IB/2 +r7i7n2:57608:57832 [0] NCCL INFO comm 0x14facc001060 rank 10 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r8i0n3:57933:58125 [1] NCCL INFO Channel 01 : 11[1c000] -> 10[1c000] [send] via NET/IB/2 +r7i1n3:4941:5144 [0] NCCL INFO Channel 01 : 1[1a000] -> 9[1a000] [send] via NET/IB/2 +r7i6n8:29153:29342 [1] NCCL INFO Channel 01 : 5[1c000] -> 7[1c000] [receive] via NET/IB/2 +r7i6n8:29155:29343 [3] NCCL INFO Channel 01 : 5[8a000] -> 7[8a000] [receive] via NET/IB/3 +r9i1n6:58312:58552 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i6n8:29152:29340 [0] NCCL INFO Channel 01 : 5[1a000] -> 7[1a000] [receive] via NET/IB/2 +r7i1n3:4943:5143 [2] NCCL INFO Channel 01 : 1[88000] -> 9[88000] [send] via NET/IB/3 +r9i1n5:40813:41076 [0] NCCL INFO Channel 01 : 9[1a000] -> 13[1a000] [receive] via NET/IB/2 +r9i1n6:58312:58552 [0] NCCL INFO comm 0x14a634001060 rank 14 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r7i1n3:4944:5145 [3] NCCL INFO Channel 01 : 1[8a000] -> 9[8a000] [send] via NET/IB/3 +r9i1n7:8761:9311 [1] NCCL INFO Channel 01 : 15[1c000] -> 14[1c000] [send] via NET/IB/2 +r8i0n3:57935:58126 [3] NCCL INFO Channel 01 : 11[8a000] -> 10[8a000] [send] via NET/IB/3 +r7i7n2:57609:57833 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n2:57609:57833 [1] NCCL INFO comm 0x14c8dc001060 rank 10 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r7i4n5:46160:46357 [2] NCCL INFO Channel 01 : 9[88000] -> 5[88000] [receive] via NET/IB/3 +r8i0n3:57934:58127 [2] NCCL INFO Channel 01 : 11[88000] -> 10[88000] [send] via NET/IB/3 +r9i1n6:58313:58550 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n1:68406:68633 [2] NCCL INFO Channel 01 : 1[88000] -> 9[88000] [receive] via NET/IB/3 +r7i7n2:57611:57835 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n5:40814:41075 [1] NCCL INFO Channel 01 : 9[1c000] -> 13[1c000] [receive] via NET/IB/2 +r7i7n2:57611:57835 [3] NCCL INFO comm 0x15027c001060 rank 10 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r7i3n0:38600:38856 [2] NCCL INFO Channel 01 : 3[88000] -> 2[88000] [send] via NET/IB/3 +r9i1n6:58313:58550 [1] NCCL INFO comm 0x1507d0001060 rank 14 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n2:57610:57834 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i6n8:29154:29341 [2] NCCL INFO Channel 01 : 7[88000] -> 6[88000] [send] via NET/IB/3 +r7i1n3:4943:5143 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n7:8762:9310 [2] NCCL INFO Channel 01 : 15[88000] -> 14[88000] [send] via NET/IB/3 +r7i3n0:38601:38857 [3] NCCL INFO Channel 01 : 3[8a000] -> 2[8a000] [send] via NET/IB/3 +r7i7n2:57610:57834 [2] NCCL INFO comm 0x14f16c001060 rank 10 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n7:8763:9312 [3] NCCL INFO Channel 01 : 15[8a000] -> 14[8a000] [send] via NET/IB/3 +r7i3n0:38599:38855 [1] NCCL INFO Channel 01 : 3[1c000] -> 2[1c000] [send] via NET/IB/2 +r7i3n0:38598:38854 [0] NCCL INFO Channel 01 : 3[1a000] -> 2[1a000] [send] via NET/IB/2 +r7i1n3:4943:5143 [2] NCCL INFO comm 0x152f28001060 rank 1 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r7i2n6:894:1132 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n5:40816:41078 [3] NCCL INFO Channel 01 : 9[8a000] -> 13[8a000] [receive] via NET/IB/3 +r7i2n6:894:1132 [2] NCCL INFO comm 0x152800001060 rank 2 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n5:40815:41077 [2] NCCL INFO Channel 01 : 9[88000] -> 13[88000] [receive] via NET/IB/3 +r7i5n3:79996:80192 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n6:58315:58553 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n6:58314:58551 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i5n3:79996:80192 [2] NCCL INFO comm 0x14f5c0001060 rank 6 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n1:68407:68635 [3] NCCL INFO Channel 01 : 1[8a000] -> 9[8a000] [receive] via NET/IB/3 +r9i1n6:58315:58553 [3] NCCL INFO comm 0x145168001060 rank 14 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n1:68405:68634 [1] NCCL INFO Channel 01 : 1[1c000] -> 9[1c000] [receive] via NET/IB/2 +r9i1n6:58314:58551 [2] NCCL INFO comm 0x14c7e0001060 rank 14 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r7i2n6:895:1133 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i2n6:892:1130 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i2n6:893:1131 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i2n6:893:1131 [1] NCCL INFO comm 0x153294001060 rank 2 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r7i2n6:892:1130 [0] NCCL INFO comm 0x1519b8001060 rank 2 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n1:68404:68632 [0] NCCL INFO Channel 01 : 1[1a000] -> 9[1a000] [receive] via NET/IB/2 +r9i1n5:40813:41076 [0] NCCL INFO Channel 01 : 13[1a000] -> 11[1a000] [send] via NET/IB/2 +r7i2n6:895:1133 [3] NCCL INFO comm 0x149968001060 rank 2 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r7i1n3:4944:5145 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i1n3:4942:5142 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i6n8:29152:29340 [0] NCCL INFO Channel 01 : 7[1a000] -> 6[1a000] [send] via NET/IB/2 +r7i6n8:29155:29343 [3] NCCL INFO Channel 01 : 7[8a000] -> 6[8a000] [send] via NET/IB/3 +r7i4n5:46161:46359 [3] NCCL INFO Channel 01 : 9[8a000] -> 5[8a000] [receive] via NET/IB/3 +r7i1n3:4942:5142 [1] NCCL INFO comm 0x14e4a0001060 rank 1 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r7i4n5:46159:46358 [1] NCCL INFO Channel 01 : 9[1c000] -> 5[1c000] [receive] via NET/IB/2 +r7i1n3:4944:5145 [3] NCCL INFO comm 0x144fd4001060 rank 1 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r7i6n8:29153:29342 [1] NCCL INFO Channel 01 : 7[1c000] -> 6[1c000] [send] via NET/IB/2 +r7i4n5:46158:46356 [0] NCCL INFO Channel 01 : 9[1a000] -> 5[1a000] [receive] via NET/IB/2 +r7i1n3:4941:5144 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i1n3:4941:5144 [0] NCCL INFO comm 0x1512e0001060 rank 1 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n5:46160:46357 [2] NCCL INFO Channel 01 : 5[88000] -> 3[88000] [send] via NET/IB/3 +r7i5n3:79997:80193 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i5n3:79994:80190 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n1:68406:68633 [2] NCCL INFO Channel 01 : 9[88000] -> 5[88000] [send] via NET/IB/3 +r7i5n3:79997:80193 [3] NCCL INFO comm 0x151270001060 rank 6 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r7i5n3:79994:80190 [0] NCCL INFO comm 0x14cd68001060 rank 6 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r7i5n3:79995:80191 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i5n3:79995:80191 [1] NCCL INFO comm 0x150468001060 rank 6 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n5:40814:41075 [1] NCCL INFO Channel 01 : 13[1c000] -> 11[1c000] [send] via NET/IB/2 +r9i1n5:40816:41078 [3] NCCL INFO Channel 01 : 13[8a000] -> 11[8a000] [send] via NET/IB/3 +r9i1n5:40815:41077 [2] NCCL INFO Channel 01 : 13[88000] -> 11[88000] [send] via NET/IB/3 +r7i7n1:68407:68635 [3] NCCL INFO Channel 01 : 9[8a000] -> 5[8a000] [send] via NET/IB/3 +r7i7n1:68405:68634 [1] NCCL INFO Channel 01 : 9[1c000] -> 5[1c000] [send] via NET/IB/2 +r9i1n5:40813:41076 [0] NCCL INFO Channel 01 : 13[1a000] -> 15[1a000] [send] via NET/IB/2 +r7i7n1:68404:68632 [0] NCCL INFO Channel 01 : 9[1a000] -> 5[1a000] [send] via NET/IB/2 +r7i4n5:46161:46359 [3] NCCL INFO Channel 01 : 5[8a000] -> 3[8a000] [send] via NET/IB/3 +r7i4n5:46159:46358 [1] NCCL INFO Channel 01 : 5[1c000] -> 3[1c000] [send] via NET/IB/2 +r7i4n5:46158:46356 [0] NCCL INFO Channel 01 : 5[1a000] -> 3[1a000] [send] via NET/IB/2 +r8i0n3:57932:58124 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n5:46160:46357 [2] NCCL INFO Channel 01 : 5[88000] -> 7[88000] [send] via NET/IB/3 +r8i0n3:57932:58124 [0] NCCL INFO comm 0x1525cc001060 rank 11 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r7i7n1:68406:68633 [2] NCCL INFO Channel 01 : 9[88000] -> 13[88000] [send] via NET/IB/3 +r9i1n5:40814:41075 [1] NCCL INFO Channel 01 : 13[1c000] -> 15[1c000] [send] via NET/IB/2 +r9i1n7:8760:9309 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n7:8760:9309 [0] NCCL INFO comm 0x14da8c001060 rank 15 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r7i3n0:38600:38856 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i3n0:38600:38856 [2] NCCL INFO comm 0x1450d4001060 rank 3 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +steps: 1 loss: 12.7902 iter time (s): 150.471 samples/sec: 6.805 + iteration 1/ 1000 | elapsed time per iteration (ms): 150471.3 | learning rate: 1.875E-05 | lm loss: 1.279022E+01 | loss scale: 1024.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +r8i0n3:57933:58125 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r8i0n3:57933:58125 [1] NCCL INFO comm 0x150674001060 rank 11 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r7i6n8:29154:29341 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i6n8:29154:29341 [2] NCCL INFO comm 0x147b68001060 rank 7 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +after 1 iterations memory (MB) | allocated: 12335.4560546875 | max allocated: 19976.07177734375 | reserved: 23330.0 | max reserved: 23330.0 +time (ms) | forward: 0.00 | backward: 0.00 | optimizer: 0.00 | batch generator: 0.00 +r7i4n5:46160:46357 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n5:46160:46357 [2] NCCL INFO comm 0x14a528001060 rank 5 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n5:40816:41078 [3] NCCL INFO Channel 01 : 13[8a000] -> 15[8a000] [send] via NET/IB/3 +r9i1n7:8761:9311 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n5:40815:41077 [2] NCCL INFO Channel 01 : 13[88000] -> 15[88000] [send] via NET/IB/3 +r9i1n7:8761:9311 [1] NCCL INFO comm 0x1545ec001060 rank 15 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n1:68406:68633 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n1:68407:68635 [3] NCCL INFO Channel 01 : 9[8a000] -> 13[8a000] [send] via NET/IB/3 +r7i7n1:68406:68633 [2] NCCL INFO comm 0x153c7c001060 rank 9 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n1:68405:68634 [1] NCCL INFO Channel 01 : 9[1c000] -> 13[1c000] [send] via NET/IB/2 +r7i7n1:68404:68632 [0] NCCL INFO Channel 01 : 9[1a000] -> 13[1a000] [send] via NET/IB/2 +r7i4n5:46161:46359 [3] NCCL INFO Channel 01 : 5[8a000] -> 7[8a000] [send] via NET/IB/3 +r8i0n3:57934:58127 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r8i0n3:57935:58126 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r8i0n3:57935:58126 [3] NCCL INFO comm 0x151cac001060 rank 11 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r8i0n3:57934:58127 [2] NCCL INFO comm 0x15465c001060 rank 11 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r7i4n5:46159:46358 [1] NCCL INFO Channel 01 : 5[1c000] -> 7[1c000] [send] via NET/IB/2 +r7i4n5:46158:46356 [0] NCCL INFO Channel 01 : 5[1a000] -> 7[1a000] [send] via NET/IB/2 +r7i3n0:38601:38857 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n7:8763:9312 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n7:8762:9310 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i3n0:38601:38857 [3] NCCL INFO comm 0x1484b8001060 rank 3 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n1:68407:68635 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n7:8763:9312 [3] NCCL INFO comm 0x154ba4001060 rank 15 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n7:8762:9310 [2] NCCL INFO comm 0x145ae4001060 rank 15 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r7i7n1:68407:68635 [3] NCCL INFO comm 0x14d498001060 rank 9 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r9i1n5:40814:41075 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n5:40815:41077 [2] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n5:40813:41076 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n5:40816:41078 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r9i1n5:40814:41075 [1] NCCL INFO comm 0x145b14001060 rank 13 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r9i1n5:40813:41076 [0] NCCL INFO comm 0x14ffa8001060 rank 13 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r9i1n5:40815:41077 [2] NCCL INFO comm 0x150d5c001060 rank 13 nranks 16 cudaDev 2 busId 88000 - Init COMPLETE +r9i1n5:40816:41078 [3] NCCL INFO comm 0x1529a8001060 rank 13 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r7i7n1:68404:68632 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n1:68405:68634 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i6n8:29155:29343 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i7n1:68404:68632 [0] NCCL INFO comm 0x1451ac001060 rank 9 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r7i3n0:38598:38854 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i3n0:38599:38855 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i6n8:29155:29343 [3] NCCL INFO comm 0x14bb04001060 rank 7 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r7i3n0:38598:38854 [0] NCCL INFO comm 0x14e978001060 rank 3 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r7i3n0:38599:38855 [1] NCCL INFO comm 0x14d8b4001060 rank 3 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r7i7n1:68405:68634 [1] NCCL INFO comm 0x14a900001060 rank 9 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r7i4n5:46161:46359 [3] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n5:46161:46359 [3] NCCL INFO comm 0x1541d0001060 rank 5 nranks 16 cudaDev 3 busId 8a000 - Init COMPLETE +r7i6n8:29153:29342 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i6n8:29152:29340 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i6n8:29153:29342 [1] NCCL INFO comm 0x153fa0001060 rank 7 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +r7i4n5:46159:46358 [1] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i4n5:46158:46356 [0] NCCL INFO 2 coll channels, 2 p2p channels, 1 p2p channels per peer +r7i6n8:29152:29340 [0] NCCL INFO comm 0x14ff94001060 rank 7 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n5:46158:46356 [0] NCCL INFO comm 0x14ea44001060 rank 5 nranks 16 cudaDev 0 busId 1a000 - Init COMPLETE +r7i4n5:46159:46358 [1] NCCL INFO comm 0x14dc68001060 rank 5 nranks 16 cudaDev 1 busId 1c000 - Init COMPLETE +[2021-05-27 19:54:20,386] [INFO] [logging.py:60:log_dist] [Rank 0] step=2, skipped=0, lr=[3.75e-05, 3.75e-05], mom=[(0.9, 0.999), (0.9, 0.999)] +steps: 2 loss: 12.8004 iter time (s): 145.206 samples/sec: 7.052 + iteration 2/ 1000 | elapsed time per iteration (ms): 145207.0 | learning rate: 3.750E-05 | lm loss: 1.280042E+01 | loss scale: 1024.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) | forward: 0.00 | backward: 0.00 | optimizer: 0.00 | batch generator: 0.00 diff --git a/experiments/gpt2.md b/experiments/gpt2.md new file mode 100644 index 0000000000000000000000000000000000000000..222579dc75d71f947305351e7084d152ed6c3ddd --- /dev/null +++ b/experiments/gpt2.md @@ -0,0 +1,592 @@ +# GPT2 Experiments + +Scripts and logs of GPT2 experiments on Jean Zay HPC. + +Using 4x VT100 32GB nodes. + +(add `-C v100-32g` for 32gb nodes.) + +## Apples and Oranges + +JZ seems to give us inconsistent performance - so each allocation may give performance that can vary as much as 40%, so the numbers in the summaries of this document are very hard to compare. We thought it had to do with the proximity of the allocated nodes but it proved to vary randomly through the day, most likely highly dependening on the traffic on the JZ network. + +Therefore any results you will find in this summary are +/-40% correct. An identical test scored 40% faster or slower on the same allocation at different times of the day. + +## Megatron-LM + +Constants: + +- `TP_SIZE` = tensor parallel +- `PP_SIZE` = pipeline parallel +- `DP_SIZE` = data parallel is derived automatically from `WORLD_SIZE / (TP_SIZE * PP_SIZE)` +- `WORLD_SIZE` = total number of GPUs + +According to Megatron-LM paper the highest degree of TP we can use is 4 for 4-gpu nodes - crossing nodes would slow things down a lot. So max `TP_SIZE=4`. So the full 4 gpu node is used only for tensor parallel dimension. + +## Metrics + +TFlops: `model_size_in_B * 4 * 2 * seq * global_batch_size / (time_in_sec_per_interation * total_gpus * 1e3)` + +The factor of 4 is when used with activation check-pointing, +otherwise it will be 3, but for 200B model, activation check-pointing will always be on. + +The peak of V100 32gb gpu is about 125 TFlops/sec [spec](https://images.nvidia.com/content/technologies/volta/pdf/volta-v100-datasheet-update-us-1165301-r5.pdf). But we cannot get the peak. The max achievable performance will be 30-60TFlops depending on the model size. So if you see low 20s, the model is not tuned well, if you see, over 100 then there is a bug in the calculation.  + +For v100 16gb gpus the max spec is 120 TFlops/sec. + +## Allocation + +``` +salloc --constraint=v100-32g --nodes=16 --ntasks=16 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=6:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + + +### Megatron + +The full slurm scripts and log files are at [`gpt2-meg`](./gpt2-meg): +- scripts starting with `meg_gpt2_base_` are for getting the baseline with tiny BS +- scripts starting with `meg_gpt2_perf_` are for smaller model, and tuned for high performance + +Not yet optimized with NVIDIA team! + +Metrics can be calculated in bash after figuring out the throughput (in seconds): + +``` +THROUGHPUT=122 +NNODES=16 +MSIZE=52 +MICRO_BATCH_SIZE=4 +DP_SIZE=1 +PP_CHUNKS=256 +echo "($MSIZE*4*2*1024*$MICRO_BATCH_SIZE*$DP_SIZE*$PP_CHUNKS)/($THROUGHPUT*$NNODES*4*1000)" | bc -l +55.86675409836065573770 +``` + +**Max model size** + +These first results are all about how big of a model can be fit into the given the hardware on the smallest batch size, disregarding throughput. + +16GB nodes: + +| GPUs | Size | DP | PP | PP Chunks | Mic-BS | Glob-BS | Speed | TFlops | +| ---: | ---: | -: | -: | --------: | -----: | -----: | -----: | -----: | +| 16 | 7.5B | 1 | 4 | 4 | 1 | 4 | 0.661s | 23.2 | +| 64 | 30B | 1 | 16 | 4 | 1 | 4 | 1.439s | 10.7 | +| 128 | 50B | 1 | 32 | 4 | 1 | 4 | 2.124s | 6.0 | +| 256 | 78B | 1 | 64 | 4 | 1 | 4 | 2.953s | 3.4 | +| 256 | 22B | 4 | 16 | 4 | 1 | 4 | 1.826s | 1.5 | +| | | | | | | | | | + +32GB nodes: + +| GPUs | Size | DP | PP | PP Chunks | Mic-BS | Glob-BS | Speed | TFlops | +| ---: | ---: | -: | -: | --------: | -----: | -----: | -----: | -----: | +| 16 | 18B | 1 | 4 | 4 | 1 | 4 | 1.381s | 26.7 | +| 32 | 30B | 1 | 8 | 4 | 1 | 4 | 1.618s | 19.0 | +| 64 | 65B | 1 | 16 | 4 | 1 | 4 | 2.738s | 12.2 | +| 128 | 116B | 1 | 32 | 4 | 1 | 4 | 4.234s | 7.0 | +| 256 | 206B | 1 | 64 | 4 | 1 | 4 | 6.736s | 3.9 | +| | | | | | | | | | + +The TFLops are very low because there are too few PP chunks/micro-batches (4) (gradient accumulation size / GAS) and so the bubble takes a lot of overhead, increasing PP chunks should dramatically improve performance but also need to lower the max model size to have memory to hold those chunks in. + +**Performance** + +These experiments are to try a lower model size, but much higher TFlops performance + +| GPUs | Size | DP | PP | PP Chunks | Mic-BS | Glob-BS | Speed | TFlops | Notes | +| ---: | ---: | -: | -: | --------: | -----: | -----: | ----: | -----: | ----: | +| 16 | 18B | 1 | 8 | 64 | 4 | 256 | 90.5s | 26.1 | 05-26 | +| 16 | 18B | 1 | 8 | 128 | 4 | 512 | 177s | 26.7 | 05-26 | +| 16 | 18B | 1 | 8 | 256 | 4 | 1024 | 356s | 26.5 | 05-26 | +| | | | | | | | | | | +| 16 | 18B | 1 | 4 | 128 | 4 | 512 | 179s | 26.4 | 05-26 | +| 16 | 18B | 1 | 4 | 128 | 6 | 768 | 262s | 27.0 | 05-26 | +| 16 | 18B | 1 | 8 | 128 | 6 | 768 | 259s | 27.3 | 05-26 | +| 16 | 18B | 1 | 8 | 32 | 8 | 256 | 89s | 26.5 | 05-26 | +| | | | | | | | | | | +| 32 | 39B | 1 | 8 | 128 | 4 | 512 | 82s | 62.3 | 05-26 | +| 32 | 39B | 1 | 8 | 128 | 6 | 768 | 123s | 62.3 | 05-26 | +| 32 | 39B | 1 | 8 | 256 | 6 | 1536 | 241s | 63.6 | 05-26 | +| 32 | 39B | 1 | 8 | 512 | 6 | 3072 | 478s | 64.2 | 05-26 | +| | | | | | | | | | | +| 64 | 52B | 1 | 16 | 256 | 4 | 1024 | 129s | 52.8 | 05-25 | +| 64 | 52B | 1 | 16 | 256 | 4 | 1024 | 217s | 31.4 | 05-26 | +| 64 | 52B | 1 | 16 | 256 | 4 | 1024 | 125s | 54.5 | 05-27 | +| 64 | 52B | 1 | 16 | 256 | 4 | 1024 | 225s | 30.3 | 05-28 | +| | | | | | | | | | | +| 64 | 52B | 1 | 16 | 256 | 6 | 1536 | 328s | 31.2 | 05-26 | +| 64 | 52B | 1 | 16 | 256 | 8 | 2048 | 435s | 31.3 | 05-26 | +| 64 | 52B | 1 | 16 | 512 | 6 | 3072 | 650s | 31.5 | 05-26 | +| 64 | 52B | 1 | 16 | 512 | 8 | 4096 | 870s | 31.3 | 05-26 | +| 64 | 52B | 1 | 32 | 256 | 4 | 1024 | 220s | 31.0 | 05-26 | +| | | | | | | | | | | + + +data: +- Size = Model Size +- `TP=4` in all of entries +- Speed is time per iteration - to complete global batch size +- Global batch size is `micro-batch-size * pp_chunks * dp_size` +- PP chunks is the number of PP stages, so each pipeline handles `micro-batch-size * pp_chunks` +- Seq length is 1024 + +notes: +- 32gpus had a very snag fit for gpu memory for 39B model (others were in ~75%) so it might be a bit too risky to OOM-borderline + + + + +#### Megatron + Deepspeed 3D (new branch) + + +Why: + +1. More generic pipeline API that is not hard-coded into the model +2. Better memory efficiency - needs less GPU memory, so can probably work with fewer pipeline stages +3. Works with ZeRO-Offload so can significantly reduce the GPUs required for fine-tuning once the model is pre-trained, making it accessible to a lot more folks, who don't have access to hundreds of GPUs. + +How: + + +This is new branch synced with Megatron + +DeepSpeed branch: https://github.com/ShadenSmith/DeepSpeed/tree/megatron2.4-3d +Megatron branch: https://github.com/jeffra/DSE/tree/megatron-2.4-ds-pipe + +This script can now launch Meg alone or Meg + Deepspeed 3D (ignore the zero options it doesn't work yet): +https://github.com/jeffra/DSE/blob/megatron-2.4-ds-pipe/run.sh + +``` +git clone https://github.com/ShadenSmith/DeepSpeed/ deepspeed-shaden +cd deepspeed-shaden +git checkout megatron2.4-3d +``` + +``` +git clone https://github.com/jeffra/DSE megator-jeffra +cd megator-jeffra +git checkout megatron-2.4-ds-pipe +``` + +See scripts and logs under [gpt2-meg-ds-3d](./gpt2-meg-ds-3d). + +Now we use the same code-base for training w/ and w/o DS/3D - so can use a shared results table. +Also added memory usage columns. + + +| GPUs | Size | DS | GPU M | DP | PP | GAS | MBS | GBS | Speed | TFlops | Notes | +| ---: | ---: | -: | ----: | -: | -: | ---: | --: | ---: | ----: | -----: | ----: | +| 64 | 52B | Y | 26GB | 1 | 16 | 256 | 4 | 1024 | 137s | 46.7 | 06-10 | +| 64 | 52B | Y | 29GB | 1 | 16 | 256 | 4 | 1536 | 206s | 49.6 | 06-10 | +| 64 | 52B | Y | 32GB | 1 | 16 | 256 | 4 | 2048 | 270s | 50.5 | 06-10 | +| 64 | 52B | Y | 26GB | 1 | 16 | 1024 | 4 | 4096 | 544s | 50.1 | 06-10 | +| | | | | | | | | | | | | +| | | | | | | | | | | | | +| 64 | 52B | N | 32GB | 1 | 16 | 256 | 4 | 1024 | 126s | 54.1 | 06-10 | +| | | | | | | | | | | | | + + + + +``` +perl -le '$ng=64; $ms=52; $gbs=1024; $sp=146; print $ms*4*2*1024*$gbs / ( $sp * $ng * 1e3)' +``` + +- DS: Deepspeed/3D enabled +- GPU memory: rounded up per GPU +- MBS: Micro BS +- GBS: Global BS = GAS * MBS * DP_SIZE +- GAS: Gradient Accumulation Steps (= MBS pipe stages, = PP chunks) + +Resident CPU memory remained at about 3GB per GPU. + + +**zero_stage:1 + reduce_bucket_size** + +also added `--partition-activations` + +(`meg_ds_3d_gpt2_perf_n16_z1_try*.slurm`) + +| GPUs | Size | DS | bucket | DP | PP | GAS | MBS | GBS | Speed | TFlops | Notes | +| ---: | ---: | -: | ----: | -: | -: | ---: | --: | ---: | ----: | -----: | ----: | +| 64 | 52B | Y | 5e8 | 2 | 8 | 128 | 4 | 1024 | 137s | 48.8 | 07-10 | +| 64 | 52B | Y | 1e9 | 2 | 8 | 128 | 4 | 1024 | 141s | 48.3 | 07-10 | +| 64 | 52B | Y | 2e9 | 2 | 8 | 128 | 4 | 1024 | 141s | 48.3 | 07-10 | +| | | | | | | | | | | | | + +Note: Since PP*TP=8*4=32, so since there are 64GPUs - DP=2 + + +------------ +Experiment 1: +TP=4, DP=2, PP=8, gas=256, DS_ZeRO Stage 1, PA=disabled,reduce_bucket_size=2e8,5e8, mbs=2,3, + + +| ID | GPUs | Size | DS | bucket | DP | PP | GAS | MBS | GBS | Speed | TFlops | Notes | +| --: | ---: | ---: | -: | ----: | -: | -: | ---: | --: | ---: | ----: | -----: | ----: | +| 1.1 | 64 | 52B | Y | 2e8 | 2 | 8 | 256 | 2 | 1024 | 150s | 45.4 | 07-10 | +| 1.2 | 64 | 52B | Y | 5e8 | 2 | 8 | 256 | 2 | 1024 | 150s | 45.4 | 07-10 | +| 1.3 | 64 | 52B | Y | 2e8 | 2 | 8 | 256 | 3 | 1536 | 213 | 48.0 | 07-10 | +| 1.4 | 64 | 52B | Y | 5e8 | 2 | 8 | 256 | 3 | 1536 | 208 | 49.1 | 07-10 | +| | | | | | | | | | | | | | + + +------------ + +Experiment 2: HD=8192, NUM_LAYERs=48 (MSIZE=39) + +Megatron+DeepSpeed: +- USE_DEEPSPEED=1, MSIZE=39, TP=4, PP=8, DP=2, ZeRO Stage 1, mbs=4, PA=disabled, reduce_bucket_size=2e8, gas=128 +- USE_DEEPSPEED=1, MSIZE=39, TP=4, PP=8, DP=2, ZeRO Stage 1, mbs=4, PA=disabled, reduce_bucket_size=5e8, gas=128 + +Megatron Alone (which ever of the following runs better) +- USE_DEEPSPEED=0, MSIZE=39, TP=4, PP=16, DP=1, mbs=4, gas=256 +- USE_DEEPSPEED=0, MSIZE=39, TP=4, PP =8, DP=2, mbs=4, gas=128 + + +| ID | GPUs | Size | DS | bucket | DP | PP | GAS | MBS | GBS | Speed | TFlops | Notes | +| --: | ---: | ---: | -: | ----: | -: | -: | ---: | --: | ---: | ----: | -----: | ----: | +| 2.1 | 64 | 39B | Y | 2e8 | 2 | 8 | 128 | 4 | 1024 | 104s | 49.1 | 07-10 | +| 2.2 | 64 | 39B | Y | 5e8 | 2 | 8 | 128 | 4 | 1024 | 105s | 48.7 | 07-10 | +| 2.3 | 64 | 39B | N | na | 1 | 8 | 256 | 4 | 1024 | 109s | 46.9 | 07-10 | +| 2.4 | 64 | 39B | N | na | 2 | 8 | 128 | 4 | 1024 | 110s | 46.5 | 07-10 | +| | | | | | | | | | | | | | + + + + + + +------------ + +note: I also did tests on 1 node - getting almost identical results for Meg w/ and w/o DS/3D. So all the fluctuations are the network to blame for. + +``` +NNODES=1 +PP_SIZE=1 +TP_SIZE=4 +MICRO_BATCH_SIZE=4 +PP_CHUNKS=16 # GAS +MSIZE=4 +``` + +got an average over 22 iterations in msecs (too short for good stats) + +``` +ds 6875.05 +meg 6896.20 +``` +but it's obvious they are pretty similar. + + +**save-checkpoint speed Measurement** + +| Nodes | MSize | Time (ms) | +| ----: | ----: | -------: | +| 8 | 25B | 17960.68 | +| | | | +| 16 | 52B | 19298.14 | +| 32 | 52B | 19228.38 | +| 64 | 52B | 19652.80 | +| | | | +| 32 | 97B | 19417.09 | +| 64 | 97B | 11525.99 | +| | | | +| 64 | 181B | 19495.31 | +| | | | + + +Currently it saves everything, not just model weights. + +The biggest test was for 181B model, 64 nodes, 256 gpus, and a total 2.4TB per checkpoint. + +The breakdown is: + +1. 0.34TB in PP layer states, 1.4GB per file per gpu (1.4*256) - this one looks like 2bytes per param +2. 2.00TB in optimizer states, 8.0GB per file per gpu (8*256) - this one looks like 12bytes per param + +The data sizes are correspondingly: + +1. 2 bytes per param for fp16 weights +2. 12 bytes are 8 bytes for optimizer and 4 bytes for fp32 model + +To make lots of these we should copy away only the fp16 weights, and overwrite the checkpoint - otherwise a lot more HD space will be needed. + +Important: also remember that `$six_ALL_CCFRSCRATCH` files that don't get accessed in 30 days get auto-deleted, so the important checkpoints need to be backed up (probably tar'ed and put on `$six_ALL_CCFRSTORE`). + + + +### Megatron + Deepspeed 3D (old branch) + + +**Important**: `DeepSpeedExamples/Megatron-LM-v1.1.5-3D_parallelism` is not in sync with M-LM master - so several config args don't match. It's about 8 months old. + +See scripts and logs under [gpt2-meg-ds-3d-old](./gpt2-meg-ds-3d-old). + +Uses 3D: +- TP: tensor parallelism +- PP: pipeline parallelism +- DP: data parallelism + +same features as Megatron's native, but improved by Deepspeed + +**Performance** + +| GPUs | Size | DP | PP | PP chunks | Mic-BS | Glob-BS | Speed | TFlops | Notes | +| ---: | ---: | -: | -: | --------: | -----: | ------: | ----: | -----: | ----: | +| 64 | 52B | 1 | 16 | 256 | 4 | 1024 | 146s | 46.7 | 05-27 | +| | | | | | | | | | | + + +- GAS = Gradient Accumulation size (same as PP_chunks / number of PP stages) +- Global_bs = pp_chunks*micro_bs*dp_size +- `TP_SIZE=4` (size of the node) + +``` +perl -le '$ng=64; $ms=52; $gbs=1024; $sp=146; print $ms*4*2*1024*$gbs / ( $sp * $ng * 1e3)' +``` + + + + + + +#### Megatron + Deepspeed ZeRO (old branch) + + +**Important**: `DeepSpeedExamples/Megatron-LM-v1.1.5-ZeRO3` is not in sync with M-LM master - so several config args don't match. It's about 8 months old. + +See scripts and logs under [gpt2-meg-ds-zero](./gpt2-meg-ds-zero). + +This one uses only TP from Megatron (no PP) + +Not yet optimized with Deepspeed team! + +**With Offload off** + +**Performance** +| GPUs | Size | DP | Mic-BS | Glob-BS | Speed | TFlops | Notes | +| ---: | ----: | -: | ---: | -----: | ----: | -----: | ----: | +| 64 | 52B | 16 | 48 | 768 | 122s | 41.9 | 05-25 | +| 64 | 52B | 16 | 48 | 768 | 127s | 40.3 | 05-27 | +| | | | | | | | | + + +``` +perl -le '$ng=64; $ms=52; $gbs=768; $sp=122; print $ms*4*2*1024*$gbs / ( $sp * $ng * 1e3)' +``` +- Seq length is 1024 +- `TP=4` in all of entries +- `DP` is number of nodes here +- Speed is time per iteration - to complete global batch size +- Global batch size is `micro-batch-size * dp-size` + +- tried w/ and w/o Tiling once but saw no difference - perhaps would be more important on larger collections + +| GPUs | Size | TP | DP | Mic-BS | Glob-BS | Speed | TFlops | Notes | +| ---: | ---: | -: | -: | -----: | ------: | ----: | -----: | ----: | +| 64 | 52B | 4 | 16 | 48 | 768 | 127s | 40.3 | | +| 64 | 52B | 2 | 32 | 32 | 1024 | 167s | 40.8 | | +| 64 | 52B | 1 | 64 | 16 | 1024 | 184s | 37.0 | | +| 64 | 24B | 1 | 64 | 16 | 1024 | 89.0s | 35.3 | | +| 64 | 24B | 2 | 32 | 32 | 1024 | 85.7s | 36.7 | | + + +**With full cpu offload** + +| GPUs | Size | TP | DP | Mic-BS | Glob-BS | Speed | TFlops | +| ---: | ---: | -: | -: | -----: | ------: | ----: | -----: | +| 64 | 52B | 4 | 16 | 64 | 1024 | 171s | 39.9 | +| | | | | | | | | + + +Olatunji requested the following experiments: + +- enabled/set: `--split-transformers --checkpoint-num-layers=2` +- removed: `--synchronize-each-layer --contigious-checkpointing` + +| ID | GPUs | Size | ScatEmb | TP | DP | Mic-BS | Glob-BS | Speed | TFlops | +| -: | ---: | ---: | ------: | -: | -: | -----: | ------: | ----: | -----: | +| 1 | 64 | 52B | N | 4 | 16 | 48 | 768 | 119s | 43.0 | +| 2 | 64 | 52B | Y | 4 | 16 | 48 | 768 | 115s | 44.5 | +| 3 | 64 | 52B | Y | 4 | 16 | 52 | 832 | 124s | 44.7 | +| 4 | 64 | 52B | N | 2 | 32 | 32 | 1024 | 159s | 42.9 | +| 5 | 64 | 52B | Y | 2 | 32 | 32 | 1024 | 158s | 43.1 | +| 6 | 64 | 52B | Y | 2 | 32 | 36 | 1152 | 176s | 43.6 | +| 7 | 64 | 52B | Y | 4 | 16 | 56 | 896 | 161s | 37.0 | +| 8 | 64 | 52B | Y | 2 | 32 | 38 | 1216 | 178s | 45.5 | +| 9 | 64 | 52B | Y | 1 | 64 | 18 | 1152 | 197s | 38.9 | +| 10 | 64 | 52B | Y | 1 | 64 | 20 | 1280 | 219s | 38.9 | +| 11 | 64 | 52B | Y | 1 | 64 | 22 | 1408 | OOM | | +| | | | | | | | | | | + + +following 2: +from ID 8: +- removed `--checkpoint-in-cpu' +- changed values + +| ID | GPUs | Size | ScatEmb | TP | DP | Mic-BS | Glob-BS | Speed | TFlops | +| -: | ---: | ---: | ------: | -: | -: | -----: | ------: | ----: | -----: | +| 12 | 64 | 52B | Y | 4 | 16 | 24 | 384 | 72s | 35.5 | +| 13 | 64 | 52B | Y | 2 | 32 | 16 | 512 | 79s | 38.3 | +| | | | | | | | | | | + + +following 4: +from ID 12: +- removed `--split-transformers` +- changed values +- toggled `--checkpoint-in-cpu` (PA_CPU column) + +| ID | GPUs | Size | ScatEmb | PA_CPU | TP | DP | Mic-BS | Glob-BS | Speed | TFlops | +| -: | ---: | ---: | ------: | -----: | -: | -: | -----: | ------: | ----: | -----: | +| 14 | 64 | 52B | Y | N | 4 | 16 | 24 | 384 | 72s | 35.5 | +| 15 | 64 | 52B | Y | Y | 4 | 16 | 24 | 384 | 71s | 36.0 | +| 16 | 64 | 52B | Y | N | 2 | 32 | 16 | 512 | 87s | 39.2 | +| 17 | 64 | 52B | Y | Y | 2 | 32 | 16 | 512 | 88s | 38.7 | +| | | | | | | | | | | | + + + + + + +### HF + Deepspeed Zero 3 + Full Offload + +See scripts and logs under [gpt2-hf-ds](./gpt2-hf-ds). + +Not yet optimized with Deepspeed team! + +**Max model size** + +| GPUs | Size | Mic-BS | Glob-BS | Speed | TFlops | +| ---: | ----: | -----: | ------: | ----: | -----: | +| 16 | 25B | 4 | 64 | 58s | 14.0 | +| 32 | 52B | 4 | 128 | 114s | 14.9 | +| 64 | 97B | 4 | 256 | 222s | 14.3 | +| | | | | | | + + +**Performance** + +| GPUs | Size | Zero | Opt Offl | Par Offl | Mic-BS | Glob-BS | Speed | TFlops | Notes | +| ---: | ----: | --: | -------: | -------: | -----: | ------: | ----: | -----: | ----: | +| 64 | 52B | 3 | N | N | 8 | 512 | 139s | 24.5 | 05-25 | +| 64 | 52B | 3 | N | N | 4 | 256 | 185s | 9.2 | 05-27 | +| 64 | 52B | 3 | N | N | 8 | 512 | 118s | 28.9 | 05-27 | +| | | | | | | | | | | +| 64 | 52B | 3 | N | N | 8 | 512 | 117s | 29.1 | 05-28 | +| 64 | 52B | 3 | N | N | 6 | 384 | 111s | 23.0 | 05-28 | +| 64 | 52B | 3 | N | N | 10 | 640 | 150s | 28.4 | 05-28 | +| 64 | 52B | 3 | Y | N | 12 | 768 | 183s | 27.9 | 05-28 | +| 64 | 52B | 3 | Y | N | 12 | 768 | 175s | 29.2 | 05-28 | +| 64 | 52B | 3 | Y | Y | 12 | 768 | 177s | 28.9 | 05-28 | +| | | | | | | | | | | +| 64 | 52B | 2 | Y | N | | | OOM | | 05-28 | +| | | | | | | | | | | + + +- DP=GPUs +- global bs = micro bs * DP +- Speed reported by HF Trainer metrics is `samples_per_second` - So total throughput in the table is `glob_bs/samples_per_second` + +notes: +- gradient checkpointing activated + + +``` +perl -le '$ng=64; $ms=52; $gbs=512; $sp=139.52; print $ms*4*2*1024*$gbs / ( $sp * $ng * 1e3)' +22 +``` + +ZeRO-2 with model of this size I can't fit into this setup at all - even BS=4 - it keeps getting on getting killed by cgroups - i.e. it's asking for more than 40GB general RAM per gpu. Same story w/ or w/o offload. + + +## Magic scripts + +- Calculate the TFlops: + +``` +perl -le '$ng=64; $ms=52; $gbs=1024; $sp=127; print $ms*4*2*1024*$gbs / ( $sp * $ng * 1e3)' +``` +(ng = total gpus, ms = model size in B, gbs = global batch size, sp = throughput in seconds) + +same with bash env vars and broken down GBS into mbs*dp*gas (gas=pp_chunks): +``` +echo "($MSIZE*4*2*1024*$MICRO_BATCH_SIZE*$DP_SIZE*$PP_CHUNKS)/($THROUGHPUT*$NNODES*4*1000)" | bc -l +``` + +- Automatically process slurm/ megatron log files, average the throughput (prints 'fail' on when the training failed w/o producing a single iteration stat): +``` +find . -type f -name "*out" -exec perl -lne 'm|elapsed time per iteration .ms.: ([\d\.]+)| && do {$x+=$1; $c++}; END { print "$ARGV " . ($c ? int($x/$c/1000) : "fail")}' {} \; | sort | grep -v fail +``` + + +- re-generate tflops column in the tables above: +``` +perl -ne 's#^(\| +(\d+) +\| +(\d+)B.*? +(\d+) +\| +([\d\.]+)s) +\| +[\d\.]+ +(.*?)$#"$1 | ".sprintf("%.01f", $3*4*2*1024*$4 / ($5*$2*1e3))." $6"#e && print ' gpt2.md +``` + +I originally had a mistake in model size calculation script - which has been fixed in tables and the scripts, but many logs still have the old formula - I used G `(2**30)` instead of B `(10**9)` so the model size was getting reported smaller than it is. + +Now it's the correct version: +``` +NHIDDEN=4096 +NLAYERS=36 +SEQ_LEN=512 +VOCAB_SIZE=50257 +python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + s*h + 2*h) / 10**9 :.0f}B')" +``` + +- misc file renames + + +``` +# rename both .sh and .out based on GAS (PP_CHUNKS) value inside +# 61B-megatron-mbs-2-pp16-dp-1.sh -> 61B-megatron-mbs-2-pp16-dp-1-gas128.sh +perl -lne 'm|PP_CHUNKS=(\d+)| && do {$gas=$1; $q = chr(39); $ARGV=~s|\.sh$||; print qq[rename.pl ${q}s|dp-(\\d)|dp-\$1-gas-$gas|$q $ARGV*] }' *sh > run-renames.sh +sh ./run-renames.sh +``` + +- A formula to match the script name to the log file, by rewriting the `job-name`: +``` +perl -pi -e '$ARGV=~s|\.sh$||; s|#SBATCH --job-name=.*|#SBATCH --job-name=$ARGV|' *slurm *sh +``` +now the log file will match the slurm file. + +- change runtime: +``` +perl -pi -e '$ARGV=~s|\.sh$||; s|#SBATCH --time=.*|#SBATCH --time=00:20:00|' *slurm *sh +``` + +- calculate speed + tflops from filename and averaging `elapsed time per iteration` from the log - including failed runs (needs the `-gas-` file rename from above) + +``` +find . -type f -name "*out" -exec perl -lne 'm|elapsed time per iteration .ms.: ([\d\.]+)| && do {$x+=$1; $c++}; END { $sp=$c ? int($x/$c/1000) : 0; $d=qr/(\d+)/; $ARGV=~m|${d}B-.*?-mbs-$d-pp$d-dp-$d-gas-$d| && do {$ng=64; $ms=$1; $gbs=$2*$4*$5; $tf=$sp ? sprintf "%0.1f", $ms*4*2*1024*$gbs / ( $sp * $ng * 1e3) : 0}; $r = $sp ? "$ARGV $sp $tf" : "$ARGV fail"; print $r}' {} \; | sort -nk3 -r +./61B-megatron-mbs-2-pp16-dp-1-gas-512-200977.out 144 55.5 +./55B-megatron-mbs-2-pp16-dp-1-gas-512-200968.out 134 53.8 +./55B-ds-zero0-mbs-2-pp16-dp-1-gas-512-200964.out 141 51.1 +./55B-ds-zero0-mbs-4-pp16-dp-1-gas-256-200965.out 145 49.7 +./55B-megatron-mbs-4-pp16-dp-1-gas-256-200970.out 149 48.4 +./61B-ds-zero0-mbs-4-pp16-dp-1-gas-256-200973.out 166 48.2 +./61B-ds-zero0-mbs-2-pp16-dp-1-gas-512-200972.out 169 47.3 +./61B-megatron-mbs-4-pp16-dp-1-gas-256-200979.out 172 46.5 +./61B-megatron-mbs-4-pp8-dp-2-gas-128-200980.out fail +./61B-megatron-mbs-2-pp8-dp-2-gas-256-200978.out fail +./61B-ds-zero1-mbs-4-pp8-dp-2-gas-128-200976.out fail +./61B-ds-zero1-mbs-2-pp8-dp-2-gas-256-200974.out fail +./55B-megatron-mbs-4-pp8-dp-2-gas-128-200971.out fail +./55B-megatron-mbs-2-pp8-dp-2-gas-256-200969.out fail +./55B-ds-zero1-mbs-4-pp8-dp-2-gas-128-200967.out fail +./55B-ds-zero1-mbs-2-pp8-dp-2-gas-256-200966.out fail +``` + +- same as above but with finer control over which files are processed and preserving their run order, e.g. sorted by latest run: +``` +ls -1t 61*out | xargs -n1 perl -lne 'm|elapsed time per iteration .ms.: ([\d\.]+)| && do {$x+=$1; $c++}; END { $sp=$c ? int($x/$c/1000) : 0; $d=qr/(\d+)/; $ARGV=~m|${d}B-.*?-mbs-$d-pp$d-dp-$d-gas-$d| && do {$ng=64; $ms=$1; $gbs=$2*$4*$5; $tf=$sp ? sprintf "%0.1f", $ms*4*2*1024*$gbs / ( $sp * $ng * 1e3) : 0}; $r = $sp ? "$ARGV $sp $tf" : "$ARGV fail"; print $r}' +61B-ds-zero1-mbs-2-pp16-dp-1-gas-512.18488.out 196 40.8 +61B-megatron-mbs-2-pp16-dp-1-gas-512.8189.out 176 45.4 +61B-ds-zero1-mbs-2-pp16-dp-1-gas-512.17709.out 194 41.2 +``` diff --git a/experiments/lm-harness-evaluation.md b/experiments/lm-harness-evaluation.md new file mode 100644 index 0000000000000000000000000000000000000000..363bc9f3a71529afbe57203593b5d82d7e141acb --- /dev/null +++ b/experiments/lm-harness-evaluation.md @@ -0,0 +1,29 @@ +# LM Harness Evaluation + +The evaluation harness from EleutherAI is integrated a submodule. We use a fork on [HF's Github](https://github.com/huggingface/lm-evaluation-harness). +To initialize the submodule, run: +```bash +git submodule init +git submodule update +``` + +Make sure you have the requirements in `lm-evaluation-harness`: +```bash +cd lm-evaluation-harness +pip install -r requirements.txt +``` + +To launch an evaluation, run: +```bash +python lm-evaluation-harness/main.py \ + --model gpt2 \ + --model_args pretrained=gpt2-xl \ + --tasks cola,mrpc,rte,qnli,qqp,sst,boolq,cb,copa,multirc,record,wic,wsc,coqa,drop,lambada,lambada_cloze,piqa,pubmedqa,sciq \ + --provide_description \ # Whether to provide the task description + --num_fewshot 3 \ # Number of priming pairs + --batch_size 2 \ + --output_path eval-gpt2-xl +``` + +Please note: +- As of now, only single GPU is supported in `lm-evaluation-harness`. diff --git a/experiments/performance.md b/experiments/performance.md new file mode 100644 index 0000000000000000000000000000000000000000..7360de4125d1c59a61a8612359874ee35be1ddbc --- /dev/null +++ b/experiments/performance.md @@ -0,0 +1,7 @@ +# Performance + +## Network + +The state of the network can hugely impact the performance of the training - to the tune of 40% difference in throughput. + +When making slurm allocations, use `--contiguous` to request nodes to be close to each other. Unless reserved ahead of time by the admins, such constraint may add a huge delay for when such requests will be granted. diff --git a/experiments/tr8-104B.md b/experiments/tr8-104B.md new file mode 100644 index 0000000000000000000000000000000000000000..0d3072083454ab58f0ceeec446c47cf718d031c3 --- /dev/null +++ b/experiments/tr8-104B.md @@ -0,0 +1,103 @@ +# Train 8 104B wide tune up + +note: this tune up table is somewhat invalid since during the tune up a mistake was made in `FFN_HIDDEN_SIZE` which was incorrectly set to a much lower value, so the tests below were really tested a 58B model. So the TFLOPs numbers in this section are incorrect (bigger than they are in reality). but I'm not sure how to fix it, since I don't think the formula applies when the model is lopsided. The numbers in sections afterwards are correct. + +The misconfiguration error has been fixed later in the experiments. + +``` +NLAYERS=32 +NHIDDEN=16384 +NHEADS=32 +SEQ_LEN=2048 +VOCAB_SIZE=50257 +``` + +BS=1024, SIZE=104B, + +| NNODES | TP | PP | DP | MBS | Speed | TFlops | Notes | +| -----: | --: | --: | --: | --: | ----: | -----: | --------------------: | +| 32 | 4 | 32 | 1 | 1 | 256 | 54.5 | 31.5GB | +| 64 | 4 | 64 | 1 | 1 | 155 | 55.0 | 24GB | +| | | | | | | | | + +``` +perl -le '$ng=32*4; $sp=256; $ms=104; $gbs=1048; $seqlen=2048; print $ms*4*2*$seqlen*$gbs / ( $sp * $ng * 1e3)' +perl -le '$ng=64*4; $sp=155; $ms=104; $gbs=1048; $seqlen=2048; print $ms*4*2*$seqlen*$gbs / ( $sp * $ng * 1e3)' +``` + +(ng = total gpus, ms = model size in B, gbs = global batch size, sp = throughput in seconds) + +BS=2048 + + +| NNODES | TP | PP | DP | MBS | Speed | TFlops | Notes | +| ----: | --: | --: | --: | --: | ----: | -----: | --------------------: | +| 32 | 4 | 32 | 1 | 1 | 586 | 46.52 | GB | +| 64 | 4 | 64 | 1 | 1 | 301 | 45.28 | 25GB | +| | | | | | | | | + + +``` +perl -le '$ng=32*4; $sp=586; $ms=104; $gbs=2048; $seqlen=2048; print $ms*4*2*$seqlen*$gbs / ( $sp * $ng * 1e3)' +perl -le '$ng=64*4; $sp=301; $ms=104; $gbs=2048; $seqlen=2048; print $ms*4*2*$seqlen*$gbs / ( $sp * $ng * 1e3)' +``` + + + +e.g. interactive tuning on 32 nodes + +``` +salloc --account=six@gpu --constraint=v100-32g --nodes=32 --ntasks=32 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=3:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + + + + +## BNB + +w/ `--use-bnb-optimizer` + +| NNODES | TP | PP | DP | MBS | Speed | TFlops | Notes | +| ----: | --: | --: | --: | --: | ----: | -----: | --------------------: | +| 32 | 4 | 16 | 2 | 1 | 681 | 40.0 | 31GB | +| 32 | 2 | 32 | 2 | 1 | 633 | 43.0 | 31GB | +| 32 | 1 | 64 | 2 | 1 | | | 32GB OOMs | +| 32 | 4 | 32 | 1 | 1 | 688 | 39.6 | 27GB (same conf as normal 104B) | +| | | | | | | | | + +``` +perl -le '$ng=32*4; $sp=633; $ms=104; $gbs=2048; $seqlen=2048; print $ms*4*2*$seqlen*$gbs / ( $sp * $ng * 1e3)' +``` + +To ensure we are comparing apples to apples, trying to using the same allocations re-testing the baseline (but I'm not I get the same nodes all the time). + +The baseline of 104B experiment w/o `--use-bnb-optimizer` that we have been using for all experiments + +using the `main` branch: + +| NNODES | TP | PP | DP | MBS | Speed | TFlops | Notes | +| ----: | --: | --: | --: | --: | ----: | -----: | --------------------: | +| 32 | 4 | 32 | 1 | 1 | 696 | 39.17 | 30GB (same conf as normal 104B) | +| | | | | | | | | + +using the old `big-science` branch + +| NNODES | TP | PP | DP | MBS | Speed | TFlops | Notes | +| ----: | --: | --: | --: | --: | ----: | -----: | --------------------: | +| 32 | 4 | 32 | 1 | 1 | 706 | 38.6 | 30GB (same conf as normal 104B) | +| | | | | | | | | + + + +## A100s + +GPUS_PER_NODE=8 +NNODES=16 + + +TP_SIZE=4 # always fixed to the size of a single node +PP_SIZE=32 # NLAYERS must be a multiple of PP_SIZE here +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=2048 + +TFLOPs: 72.72-82 (was speeding up - so very inconclusive) diff --git a/finetune/README.md b/finetune/README.md new file mode 100644 index 0000000000000000000000000000000000000000..36e82faac1e36073b820301218d2300a5250dd85 --- /dev/null +++ b/finetune/README.md @@ -0,0 +1,26 @@ +# Finetuning + +Notes on the plans to do finetuning with the pre-trained model + +# Large Model on smaller hardware setup + +- fine-tuning a 150-200B model with fewer GPUs than the pre-training setup + +## a. Fine-Tuning requiring only the model weights from the pre-training and uninitialized optimizer states + + +Solution: This can also be done using ZeRO-Infinity + +Hardware Requirements: This would require about 2.5-5 TB of aggregate memory for 100-200B model. It can be either CPU memory or NVMe memory, and it can be within a single node or across nodes. A single node server with enough CPU or NVMe can work, if speed is not an issue. + +Estimated Work: We can do this with ZeRO-Infinity. Seems like @Shaden Smith already has the code to load the model parameters checkpoints from Megatron+DeepSpeed 3D to Megatron+ DeepSpeed ZeRO-Infinity. + +## b. Continued-Training requiring both the model weights and optimizer states after pre-training + +Solution: This can be done using Megatron+DeepSpeed 3D with ZeRO CPU Offload. + +Hardware Requirements: This option will require 2-4 TB of aggregate CPU memory to store the optimizer states and 600-1200GB of aggregate GPU memory to store parameters, gradients and activations for 100-200B model. + +This reduces the number of GPUs required by 4x. Will run on 32-64 GPUs on 4-8x nodes with 8xV100, 768GB RAM. + +Estimated work: The current code already supports it. diff --git a/jz/archs/enwiki.md b/jz/archs/enwiki.md new file mode 100644 index 0000000000000000000000000000000000000000..e07d1c42be3be22436c208362cb6eadbb1854ecb --- /dev/null +++ b/jz/archs/enwiki.md @@ -0,0 +1,415 @@ + + +# GPT2 Comparisons on EnWiki + +This is a back up copy of the work in progress notes when it was started using Enwiki. + +It's currently not being kept up-to-date + +For now we moved to openwebtext so the main README.md doc is now using that. + +## SLURM + + +1 nodes / 4 gpus: + +``` +srun --pty --nodes=1 --ntasks=4 --cpus-per-task=10 --gres=gpu:4 --hint=nomultithread --time=60 bash +``` + + + +## Data + + + +### Enwiki + +data prep https://github.com/NVIDIA/Megatron-LM#collecting-wikipedia-training-data + +Megatron-LM's training is based on enwiki +huge dataset - but it's not needed for sample run, see short sample below +``` +wget https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2 +pip install git+https://github.com/attardi/wikiextractor +wikiextractor --json enwiki-latest-pages-articles.xml.bz2 +``` + + +short sample +``` +cd data +wget https://dumps.wikimedia.org/enwiki/20210501/enwiki-20210501-pages-articles-multistream1.xml-p1p41242.bz2 +wikiextractor --json enwiki-20210501-pages-articles-multistream1.xml-p1p41242.bz2 +mv text text-short +cd - +python tools/preprocess_data.py \ + --input data/text-short/AD/wiki_29 \ + --output-prefix my-gpt2 \ + --vocab data/gpt2-vocab.json \ + --dataset-impl mmap \ + --tokenizer-type GPT2BPETokenizer \ + --merge-file data/gpt2-merges.txt \ + --append-eod +``` + +### OpenWebText + +Using OpenWebText https://huggingface.co/datasets/openwebtext + +``` +from datasets import load_dataset +dataset = load_dataset("openwebtext", split='train') +dataset = load_dataset("stas/openwebtext-10k", split='train') +``` + +Ready datasets: + +1. HF datasets use: + + * `openwebtext` - 8M records `--dataset_name "openwebtext"` + * `stas/openwebtext-10k` - 10K records `--dataset_name "stas/openwebtext-10k"` + +2. Jsonlines (derived): + + * `$six_ALL_CCFRWORK/datasets-custom/openwebtext/openwebtext.jsonl` + * `$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/openwebtext-10k.jsonl` + +3. Megatron-preprocessed datasets (derived): + + * `$six_ALL_CCFRWORK/datasets-custom/openwebtext/meg-gpt2_*` (still churning) + * `$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_*` + + +#### How the above was done + +To convert to jsonlines for Megatron + +run on a beefy cpu instance (but firewalled), e.g.: +``` +srun --pty --nodes=1 --ntasks=4 --cpus-per-task=10 --gres=gpu:0 --hint=nomultithread --time=60 bash +``` + +small +``` +mkdir -p $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k +cd $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k +$six_ALL_CCFRWORK/code/bigscience/data/megatron/openwebtext-to-jsonl.py -10k +``` + +full (needs lots or RAM) +``` +mkdir -p $six_ALL_CCFRWORK/datasets-custom/openwebtext +cd $six_ALL_CCFRWORK/datasets-custom/openwebtext +HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 $six_ALL_CCFRWORK/code/bigscience/data/megatron/openwebtext-to-jsonl.py +``` + + + +To prep for megatron 10k-sample +``` +cd $six_ALL_CCFRWORK/code/megatron-lm +python tools/preprocess_data.py \ + --input $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/openwebtext-10k.jsonl \ + --output-prefix $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2 \ + --vocab data/gpt2-vocab.json \ + --dataset-impl mmap \ + --tokenizer-type GPT2BPETokenizer \ + --merge-file data/gpt2-merges.txt \ + --append-eod +``` + +To prep for megatron full dataset +``` +cd $six_ALL_CCFRWORK/code/megatron-lm +python tools/preprocess_data.py \ + --input $six_ALL_CCFRWORK/datasets-custom/openwebtext/openwebtext.jsonl \ + --output-prefix $six_ALL_CCFRWORK/datasets-custom/openwebtext/meg-gpt2 \ + --vocab data/gpt2-vocab.json \ + --dataset-impl mmap \ + --tokenizer-type GPT2BPETokenizer \ + --merge-file data/gpt2-merges.txt \ + --append-eod +``` +as it should take about 11h to convert use `gpt2/jsonl-to-meg.slurm` job to complete it + + + +## Model + + +### HF transformers model prep + + +prep HF model - it's not avaliable on the hub + +1. Download nvidia checkpoint: +``` +wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_lm_345m/versions/v0.0/zip -O megatron_lm_345m_v0.0.zip +``` + +2. Convert: +``` +python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py megatron_lm_345m_v0.0.zip +``` + +3. Fetch missing files +``` +git clone https://huggingface.co/nvidia/megatron-gpt2-345m/ +``` + +4. Move the converted files into the cloned model dir +``` +mv config.json pytorch_model.bin megatron-gpt2-345m/ +``` + +5. megatron-gpt2-345m dir should now have all the files which can be passed as `--model_name_or_path megatron-gpt2-345m` + + +XXX: may be will use some small samples for testing - need .txt and .json for megatron-lm + +``` + #--train_file {data_dir}/sample_text.txt \ + #--validation_file {data_dir}/sample_text.txt \ +``` + + +## Training + +### Megatron-LM + +running native https://github.com/NVIDIA/Megatron-LM + +### finetuning on a single GPU + + +adding --finetune to work with existing checkpoint +``` +CHECKPOINT_PATH=checkpoints/megatron_lm_345m_v0.0/release +SAVE_CHECKPOINT_PATH=data/checkpoints +VOCAB_FILE=data/gpt2-vocab.json +MERGE_FILE=data/gpt2-merges.txt +DATA_PATH=my-gpt2_text_document + +# --train-samples 200 \ +# --lr-decay-samples 150 \ +# --train-iters 100000 \ +# --lr-decay-iters 320000 \ +GPT_ARGS="--num-layers 24 \ + --hidden-size 1024 \ + --num-attention-heads 16 \ + --seq-length 1024 \ + --max-position-embeddings 1024 \ + --micro-batch-size 4 \ + --global-batch-size 8 \ + --lr 0.00015 \ + --lr-decay-style cosine \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --lr-warmup-fraction .01 \ + --finetune \ + --train-iters 1000 \ + --lr-decay-iters 800 \ + --fp16" + +OUTPUT_ARGS="--log-interval 10 \ + --save-interval 500 \ + --eval-interval 100 \ + --eval-iters 10 \ + --checkpoint-activations" + +python pretrain_gpt.py \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $SAVE_CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH +``` + + +### finetune distributed with MP + + +``` +OUTPUT_ARGS="--log-interval 10 \ + --save-interval 500 \ + --eval-interval 100 \ + --eval-iters 10 \ + --checkpoint-activations" + +VOCAB_FILE=data/gpt2-vocab.json +MERGE_FILE=data/gpt2-merges.txt +DATA_PATH=my-gpt2_text_document +CHECKPOINT_PATH=checkpoints/megatron_lm_345m_v0.0/release +SAVE_CHECKPOINT_PATH=data/checkpoints + +GPUS_PER_NODE=4 +NNODES=1 + +#Change for multinode config + +MASTER_ADDR=localhost +MASTER_PORT=6000 +NODE_RANK=0 +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" + +# --train-iters 100000 \ +# --lr-decay-iters 320000 \ + +python -m torch.distributed.launch \ + $DISTRIBUTED_ARGS \ + pretrain_gpt.py \ + --tensor-model-parallel-size 2 \ + --pipeline-model-parallel-size 2 \ + --num-layers 24 \ + --hidden-size 1024 \ + --num-attention-heads 16 \ + --micro-batch-size 4 \ + --global-batch-size 16 \ + --seq-length 1024 \ + --max-position-embeddings 1024 \ + --save $SAVE_CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + --lr 0.00015 \ + --lr-decay-style cosine \ + --min-lr 1.0e-5 \ + --weight-decay 1e-2 \ + --clip-grad 1.0 \ + --lr-warmup-fraction .01 \ + $OUTPUT_ARGS \ + --train-samples 5000 \ + --lr-decay-samples 4000 \ + --finetune \ + --fp16 +``` + + +### stats ### + +``` +16gb v100: +nodes=1, gpus=4 => 560 ms / iteration +nodes=1, gpus=1 => 628 ms / iteration +``` + + +### Megatron-LM+Deepspeed: w/ deepspeed Pipeline + +This is the version with Deepspeed's pipeline + +https://github.com/microsoft/DeepSpeedExamples/blob/master/Megatron-LM-v1.1.5-3D_parallelism/examples/ds_pretrain_gpt2_pipe.sh + + + +### Megatron-LM+Deepspeed: w/ deepspeed zero3/inf + +This is the version with Deepspeed's Zero3/inf + +https://github.com/microsoft/DeepSpeedExamples/blob/master/Megatron-LM-v1.1.5-ZeRO3/examples/ds_pretrain_gpt2-zero3.sh + + + +### HF transformers distributed + +Have to run once on a non-gpu instance which has network to retrieve the model and data files and get those cached. + + +``` +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +``` + +``` +MODEL=$WORK/hf/megatron-lm/checkpoints/megatron-gpt2-345m +DATASET1=" \ + --dataset_name wikitext \ + --dataset_config_name wikitext-2-raw-v1" + +DATASET=" \ + --dataset_name openwebtext" +``` + +first run on networked instance to get the dataset et, al. +``` +PYTHONPATH="src" \ +examples/pytorch/language-modeling/run_clm.py \ + --model_name_or_path $MODEL \ + $DATASET \ + --output_dir output_dir \ + --overwrite_output_dir \ + --do_train \ + --do_eval \ + --max_train_samples 160 \ + --max_eval_samples 160 \ + --per_device_train_batch_size 4 \ + --per_device_eval_batch_size 4 \ + --num_train_epochs 1 \ + --warmup_steps 8 \ + --block_size 64 \ + --report_to none +``` + + +2nd run on gpu instance w/o network +``` +PYTHONPATH="src" \ +HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \ +python -m torch.distributed.launch --nproc_per_node=4 \ +examples/pytorch/language-modeling/run_clm.py \ + --model_name_or_path $MODEL \ + $DATASET \ + --output_dir output_dir \ + --overwrite_output_dir \ + --do_train \ + --do_eval \ + --max_train_samples 160 \ + --max_eval_samples 160 \ + --per_device_train_batch_size 4 \ + --per_device_eval_batch_size 4 \ + --num_train_epochs 1 \ + --warmup_steps 8 \ + --block_size 64 \ + --fp16 \ + --report_to none +``` + + + +### HF transformers + Deepspeed + +probably should test zero2 and zero3 + +``` +PYTHONPATH="src" \ +HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \ +deepspeed --num_nodes 1 --num_gpus 4 \ +examples/pytorch/language-modeling/run_clm.py \ + --model_name_or_path $WORK/hf/megatron-lm/checkpoints/megatron-gpt2-345m \ + --dataset_name wikitext \ + --dataset_config_name wikitext-2-raw-v1 \ + --output_dir output_dir \ + --overwrite_output_dir \ + --do_train \ + --do_eval \ + --max_train_samples 160 \ + --max_eval_samples 160 \ + --per_device_train_batch_size 4 \ + --per_device_eval_batch_size 4 \ + --num_train_epochs 1 \ + --warmup_steps 8 \ + --block_size 64 \ + --fp16 \ + --report_to none \ + --deepspeed tests/deepspeed/ds_config_zero3.json + +``` diff --git a/jz/archs/gpt2.md b/jz/archs/gpt2.md new file mode 100644 index 0000000000000000000000000000000000000000..7fab0eab1411e13e8f4d421c6fb472ed35ed6ebb --- /dev/null +++ b/jz/archs/gpt2.md @@ -0,0 +1,863 @@ +# GPT2 Comparisons + +## SLURM + + +1 nodes / 4 gpus: + +``` +srun --pty --nodes=1 --ntasks=4 --cpus-per-task=10 --gres=gpu:4 --hint=nomultithread --time=60 bash +``` + +For multi-node versions of these scripts please see `$six_ALL_CCFRWORK/code/bigscience/jz/slurm`. + + +## Data + +Using OpenWebText https://huggingface.co/datasets/openwebtext + +``` +from datasets import load_dataset +dataset = load_dataset("openwebtext", split='train') +dataset = load_dataset("stas/openwebtext-10k", split='train') +``` + +Ready datasets: + +1. HF datasets use: + + * `openwebtext` - 8M records `--dataset_name "openwebtext"` + * `stas/openwebtext-10k` - 10K records `--dataset_name "stas/openwebtext-10k"` + +2. Jsonlines (derived): + + * `$six_ALL_CCFRWORK/datasets-custom/openwebtext/openwebtext.jsonl` + * `$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/openwebtext-10k.jsonl` + +3. Megatron-preprocessed datasets (derived): + + * `$six_ALL_CCFRWORK/datasets-custom/openwebtext/meg-gpt2_text_document.*` + * `$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document.*` + + + + +#### How the above was done + +To convert to jsonlines for Megatron + +run on a beefy cpu instance (but firewalled), e.g.: +``` +srun --pty --nodes=1 --ntasks=1 --cpus-per-task=32 --gres=gpu:0 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + +Get vocabs: +``` +cd $six_ALL_CCFRWORK/datasets-custom/vocabs +wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json +wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt +``` + +small +``` +mkdir -p $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k +cd $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k +$six_ALL_CCFRWORK/code/bigscience/data/megatron/openwebtext-to-jsonl.py -10k +``` + +full (needs lots or RAM) +``` +mkdir -p $six_ALL_CCFRWORK/datasets-custom/openwebtext +cd $six_ALL_CCFRWORK/datasets-custom/openwebtext +HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 $six_ALL_CCFRWORK/code/bigscience/data/megatron/openwebtext-to-jsonl.py +``` + +To prep a 10k-sample for megatron +``` +cd $six_ALL_CCFRWORK/code/megatron-lm +python tools/preprocess_data.py \ + --input $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/openwebtext-10k.jsonl \ + --output-prefix $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2 \ + --vocab $six_ALL_CCFRWORK/datasets-custom/vocabs/gpt2-vocab.json \ + --dataset-impl mmap \ + --tokenizer-type GPT2BPETokenizer \ + --merge-file $six_ALL_CCFRWORK/datasets-custom/vocabs/gpt2-merges.txt \ + --append-eod \ + --workers 8 +``` + +To prep a full dataset for megatron +``` +cd $six_ALL_CCFRWORK/code/megatron-lm +python tools/preprocess_data.py \ + --input $six_ALL_CCFRWORK/datasets-custom/openwebtext/openwebtext.jsonl \ + --output-prefix $six_ALL_CCFRWORK/datasets-custom/openwebtext/meg-gpt2 \ + --vocab $six_ALL_CCFRWORK/datasets-custom/vocabs/gpt2-vocab.json \ + --dataset-impl mmap \ + --tokenizer-type GPT2BPETokenizer \ + --merge-file $six_ALL_CCFRWORK/datasets-custom/vocabs/gpt2-merges.txt \ + --append-eod \ + --workers 8 +``` +as it should take a few hours to convert, use `slurm/jsonl-to-meg-gpt2.slurm` job to complete it +``` +sbatch jsonl-to-meg-gpt2.slurm +``` + + +## Model + + +Ready pretrained models: GPT2 megatron_lm_345m + +1. HF + +* `$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron-gpt2-345m` + +2. Megatron + +* `$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release` + + +#### How the above was done + +**Megatron model prep** + + +1. Download nvidia checkpoint: +``` +wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_lm_345m/versions/v0.0/zip -O megatron_lm_345m_v0.0.zip +``` +2. +``` +unzip megatron_lm_345m_v0.0.zip +``` + + +**HF transformers model prep** + + +prep HF model - it's not avaliable on the hub + +1. Download nvidia checkpoint: +``` +wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_lm_345m/versions/v0.0/zip -O megatron_lm_345m_v0.0.zip +``` + +2. Convert: +``` +python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py megatron_lm_345m_v0.0.zip +``` + +3. Fetch missing files +``` +git clone https://huggingface.co/nvidia/megatron-gpt2-345m/ +``` + +4. Move the converted files into the cloned model dir +``` +mv config.json pytorch_model.bin megatron-gpt2-345m/ +``` + +5. megatron-gpt2-345m dir should now have all the files which can be passed as `--model_name_or_path megatron-gpt2-345m` + + +XXX: may be will use some small samples for testing - need .txt and .json for megatron-lm + +``` + #--train_file {data_dir}/sample_text.txt \ + #--validation_file {data_dir}/sample_text.txt \ +``` + + +## Training + +### Megatron-LM + +running native https://github.com/NVIDIA/Megatron-LM + +``` +cd $six_ALL_CCFRWORK/code +git clone https://github.com/NVIDIA/megatron-lm +cd megatron-lm +``` + + +### Megatron: finetuning on a single GPU + + +Setup: 1 node / 1 gpu +``` +srun --pty --nodes=1 --ntasks=4 --cpus-per-task=10 --gres=gpu:1 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + +Launch training: + +adding `--finetune` to work with existing checkpoint, remove to train from scratch +``` +CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release +VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json +MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document +SAVE_CHECKPOINT_PATH=$six_ALL_CCFRWORK/checkpoints/gpt2 + +# --train-samples 200 \ +# --lr-decay-samples 150 \ +# --train-iters 100000 \ +# --lr-decay-iters 320000 \ +GPT_ARGS=" \ + --num-layers 24 \ + --hidden-size 1024 \ + --num-attention-heads 16 \ + --seq-length 1024 \ + --max-position-embeddings 1024 \ + --micro-batch-size 4 \ + --global-batch-size 8 \ + --lr 0.00015 \ + --lr-decay-style cosine \ + --min-lr 1.0e-5 \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --lr-warmup-fraction .01 \ + --finetune \ + --train-iters 1000 \ + --lr-decay-iters 800 \ + --fp16 \ + --checkpoint-activations \ + " + +OUTPUT_ARGS=" \ + --log-interval 10 \ + --save-interval 500 \ + --eval-interval 100 \ + --eval-iters 10 \ + " + +python pretrain_gpt.py \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $SAVE_CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH +``` + +Speed: 0.637s / iteration + + + +### Megatron: finetune distributed with MP + +2 types of parallelism supported: + +- `--tensor-model-parallel-size` +- `--pipeline-model-parallel-size` + +To get the average throughput have to process the logfile: + +``` +perl -nle 'use List::Util qw/sum/; m|elapsed time per iteration .ms.: ([\d\.]+)| && push @x, $1; END { print sum(@x)/+@x }' std-1611136.out +``` + +Setup: 1 node / 4 gpus +``` +srun --pty --nodes=1 --ntasks=1 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + +Launch training: +``` +CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release +VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json +MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document +SAVE_CHECKPOINT_PATH=$six_ALL_CCFRWORK/checkpoints/gpt2 + +GPUS_PER_NODE=4 +NNODES=1 + +# Change for multinode config +MASTER_ADDR=localhost +MASTER_PORT=6000 +NODE_RANK=0 +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +DISTRIBUTED_ARGS=" \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --node_rank $NODE_RANK \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +NLAYERS=24 +NHIDDEN=1024 +BATCHSIZE=4 + +# --train-iters 100000 \ +# --lr-decay-iters 320000 \ +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads 16 \ + --seq-length 1024 \ + --max-position-embeddings 1024 \ + --micro-batch-size 4 \ + --global-batch-size 16 \ + --lr 0.00015 \ + --lr-decay-style cosine \ + --min-lr 1.0e-5 \ + --finetune \ + --train-iters 1000 \ + --lr-decay-iters 800 \ + --lr-warmup-fraction .01 \ + --weight-decay 1e-2 \ + --clip-grad 1.0 \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --fp16 \ + --checkpoint-activations \ + " + +OUTPUT_ARGS=" \ + --log-interval 10 \ + --save-interval 500 \ + --eval-interval 100 \ + --eval-iters 10 \ + " + +python -m torch.distributed.launch \ + $DISTRIBUTED_ARGS \ + pretrain_gpt.py \ + --tensor-model-parallel-size 2 \ + --pipeline-model-parallel-size 2 \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $SAVE_CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl +``` + + +Speed: 0.560s / iteration + + +### Megatron: finetune distributed with MP - multi-node + + +Use `jay-z/slurm/meg-gpt2-multi-node.slurm`. + +Speed: 0.560s / iteration + + +### Megatron-LM+Deepspeed: w/ deepspeed Pipeline + +This is the version with Deepspeed's pipeline + +https://github.com/microsoft/DeepSpeedExamples/blob/master/Megatron-LM-v1.1.5-3D_parallelism/examples/ds_pretrain_gpt2_pipe.sh + + + +Setup: 1 node / 4 gpus +``` +srun --pty --nodes=1 --ntasks=1 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + + +``` + +cd $six_ALL_CCFRWORK/code/DeepSpeedExamples/Megatron-LM-v1.1.5-3D_parallelism + + +CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release +VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json +MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document +SAVE_CHECKPOINT_PATH=$six_ALL_CCFRWORK/checkpoints/gpt2 + +GPUS_PER_NODE=4 +NNODES=1 + +# Change for multinode config +MASTER_ADDR=localhost +MASTER_PORT=6000 +NODE_RANK=0 +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +export DLWS_NUM_WORKER=${NNODES} +export DLWS_NUM_GPU_PER_WORKER=${GPUS_PER_NODE} + +config_json="./ds_config.json" + + +# Megatron Model Parallelism +mp_size=2 +# DeepSpeed Pipeline parallelism +pp_size=2 + +NLAYERS=24 +NHIDDEN=1024 +BATCHSIZE=4 +NUM_ATTN_HEADS=16 + + +LOGDIR="tensorboard_data/${NLAYERS}l_${NHIDDEN}h_${NNODES}n_${GPUS_PER_NODE}g_${pp_size}pp_${mp_size}mp_${BATCHSIZE}b_ds4" + +GAS=16 + +#ZeRO Configs +stage=0 +reduce_scatter=true +contigious_gradients=true +rbs=50000000 +agbs=5000000000 + +#Actication Checkpointing and Contigious Memory +chkp_layers=1 +PA=true +PA_CPU=false +CC=true +SYNCHRONIZE=true +PROFILE=false + +GPT_ARGS=" \ + --model-parallel-size ${mp_size} \ + --pipe-parallel-size ${pp_size} \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NUM_ATTN_HEADS \ + --seq-length 1024 \ + --max-position-embeddings 1024 \ + --batch-size $BATCHSIZE \ + --gas $GAS \ + --train-iters 1000 \ + --lr-decay-iters 800 \ + --save $SAVE_CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + --lr 1.5e-4 \ + --lr-decay-style cosine \ + --min-lr 1.0e-5 \ + --weight-decay 1e-2 \ + --clip-grad 1.0 \ + --warmup 0.01 \ + --fp16 \ + " + #--tensorboard-dir ${LOGDIR} + +OUTPUT_ARGS=" \ + --log-interval 10 \ + --save-interval 500 \ + --eval-interval 100 \ + --eval-iters 10 \ + " + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${stage} \ + --zero-reduce-bucket-size ${rbs} \ + --zero-allgather-bucket-size ${agbs} \ + " + +if [ "${contigious_gradients}" = "true" ]; then +DEEPSPEED_ARGS="${DEEPSPEED_ARGS} \ + --zero-contigious-gradients" +fi + +if [ "${reduce_scatter}" = "true" ]; then +DEEPSPEED_ARGS="${DEEPSPEED_ARGS} \ + --zero-reduce-scatter" +fi + +CHKP_ARGS=" \ +--checkpoint-activations \ +--checkpoint-num-layers ${chkp_layers}" + +if [ "${PA}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --partition-activations" +fi + +if [ "${PA_CPU}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --checkpoint-in-cpu" +fi + +if [ "${SYNCHRONIZE}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --synchronize-each-layer" +fi + +if [ "${CC}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --contigious-checkpointing" +fi + +if [ "${PROFILE}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --profile-backward" +fi + +full_options="${GPT_ARGS} ${OUTPUT_ARGS} ${DEEPSPEED_ARGS} ${CHKP_ARGS}" + +run_cmd="deepspeed --num_nodes ${NNODES} --num_gpus ${GPUS_PER_NODE} pretrain_gpt2.py $@ ${full_options}" +echo ${run_cmd} +eval ${run_cmd} + +``` + + +### Megatron-LM+Deepspeed: w/ deepspeed zero3/inf + +This is the version with Deepspeed's Zero3/inf + +https://github.com/microsoft/DeepSpeedExamples/blob/master/Megatron-LM-v1.1.5-ZeRO3/examples/ds_pretrain_gpt2-zero3.sh + + + +Setup: 1 node / 4 gpus + +``` +srun --pty --nodes=1 --ntasks=1 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + + +``` + +cd $six_ALL_CCFRWORK/code/DeepSpeedExamples/Megatron-LM-v1.1.5-ZeRO3 + + +# Change for multinode config +MP_SIZE=1 + +GPUS_PER_NODE=4 +NNODES=1 + +DLTS_NUM_WORKER=$NNODES +DLTS_NUM_GPU_PER_WORKER=$GPUS_PER_NODE + +NUM_WORKERS=${DLTS_NUM_WORKER} +NUM_GPUS_PER_WORKER=${DLTS_NUM_GPU_PER_WORKER} +HIDDEN_SIZE=1024 +NUM_LAYERS=24 +BATCHSIZE=4 +NUM_ATTN_HEADS=16 + +CHECKPOINT_PATH=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron_lm_345m_v0.0/release +VOCAB_FILE=$CHECKPOINT_PATH/gpt2-vocab.json +MERGE_FILE=$CHECKPOINT_PATH/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-gpt2_text_document +SAVE_CHECKPOINT_PATH=$six_ALL_CCFRWORK/checkpoints/gpt2 + +config_json="./ds_zero_stage_3_config.json" + +#ZeRO Configs +stage=3 +reduce_scatter=true +contigious_gradients=true +rbs=50000000 +agbs=5000000000 + +#Activation Checkpointing and Contigious Memory +chkp_layers=1 +PA=true +PA_CPU=true +CC=true +SYNCHRONIZE=true +PROFILE=false + +# TiledLinear splits, 0 is disable +TILED_LINEAR="false" +TILE_DIM=1 + + +# Megatron Model Parallelism +LOGDIR="tboard-zero3/stage${stage}-lazyscatter-${NUM_LAYERS}l_${HIDDEN_SIZE}h_${NUM_WORKERS}n_${NUM_GPUS_PER_WORKER}g_${MP_SIZE}mp_${BATCHSIZE}b" + + +GPT_ARGS=" \ + --model-parallel-size ${MP_SIZE} \ + --num-layers $NUM_LAYERS \ + --hidden-size $HIDDEN_SIZE \ + --num-attention-heads ${NUM_ATTN_HEADS} \ + --seq-length 1024 \ + --max-position-embeddings 1024 \ + --batch-size $BATCHSIZE \ + --train-iters 1000 \ + --lr-decay-iters 800 \ + --save $SAVE_CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + --lr 1.5e-4 \ + --lr-decay-style cosine \ + --min-lr 1.0e-5 \ + --weight-decay 1e-2 \ + --clip-grad 1.0 \ + --warmup 0.01 \ + --fp16 \ + --scattered-embeddings \ + --split-transformers \ + " + #--tensorboard-dir ${LOGDIR} + +OUTPUT_ARGS=" \ + --log-interval 10 \ + --save-interval 500 \ + --eval-interval 100 \ + --eval-iters 10 \ + " + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${stage} \ + --zero-reduce-bucket-size ${rbs} \ + --zero-allgather-bucket-size ${agbs} \ + " + +if [ "${contigious_gradients}" = "true" ]; then +DEEPSPEED_ARGS="${DEEPSPEED_ARGS} \ + --zero-contigious-gradients" +fi + +if [ "${reduce_scatter}" = "true" ]; then +DEEPSPEED_ARGS="${DEEPSPEED_ARGS} \ + --zero-reduce-scatter" +fi + +CHKP_ARGS=" \ +--checkpoint-activations \ +--deepspeed-activation-checkpointing \ +--checkpoint-num-layers ${chkp_layers}" + +if [ "${PA}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} --partition-activations" +fi + +if [ "${PA_CPU}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --checkpoint-in-cpu" +fi + +if [ "${SYNCHRONIZE}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --synchronize-each-layer" +fi + +if [ "${CC}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --contigious-checkpointing" +fi + +if [ "${PROFILE}" = "true" ]; then +CHKP_ARGS="${CHKP_ARGS} \ + --profile-backward" +fi + +if [ "${TILED_LINEAR}" = "true" ]; then +tile_opt="${tile_opt} \ + --memory-centric-tiled-linear \ + --tile-factor=${TILE_DIM}" +fi + + +full_options="${GPT_ARGS} ${OUTPUT_ARGS} ${DEEPSPEED_ARGS} ${CHKP_ARGS}" + +run_cmd="deepspeed --num_nodes ${NNODES} --num_gpus ${GPUS_PER_NODE} pretrain_gpt2.py ${@:2} ${full_options}" +echo ${run_cmd} +eval ${run_cmd} + +``` + + +### HF transformers distributed + +Have to run once on a non-gpu instance which has network to retrieve the model and data files and get those cached. + + +``` +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +``` + +``` +MODEL=$six_ALL_CCFRWORK/models-custom/megatron-gpt2/megatron-gpt2-345m +DATASET="stas/openwebtext-10k" +``` + +``` +cd $six_ALL_CCFRWORK/code/transformers +#git clone https://github.com/huggingface/transformers +#cd transformers +``` + +``` +source $six_ALL_CCFRWORK/start-prod + +``` + + +first run on networked instance to get the dataset et, al. +``` +PYTHONPATH="src" \ +examples/pytorch/language-modeling/run_clm.py \ + --model_name_or_path $MODEL \ + --dataset_name $DATASET \ + --output_dir output_dir \ + --overwrite_output_dir \ + --do_train \ + --do_eval \ + --max_train_samples 160 \ + --max_eval_samples 160 \ + --per_device_train_batch_size 4 \ + --per_device_eval_batch_size 4 \ + --num_train_epochs 1 \ + --warmup_steps 8 \ + --block_size 64 \ + --report_to none +``` + + +2nd run on gpu instance w/o network +``` +PYTHONPATH="src" \ +HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \ +python -m torch.distributed.launch --nproc_per_node=4 \ +examples/pytorch/language-modeling/run_clm.py \ + --model_name_or_path $MODEL \ + --dataset_name $DATASET \ + --output_dir output_dir \ + --overwrite_output_dir \ + --do_train \ + --do_eval \ + --max_train_samples 1000 \ + --max_eval_samples 200 \ + --per_device_train_batch_size 4 \ + --per_device_eval_batch_size 4 \ + --num_train_epochs 1 \ + --warmup_steps 8 \ + --block_size 64 \ + --fp16 \ + --report_to none +``` + +Speed: + +train_samples_per_second = 5.043 + + +let's do multi-node: + +Setup: 2 nodes / 4 gpus +``` +srun --pty --nodes=2 --ntasks=8 --cpus-per-task=10 --gres=gpu:4 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + +Launch training: + +``` +PYTHONPATH="src" \ +HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \ +python -m torch.distributed.launch --nnodes=2 --nproc_per_node=4 \ +examples/pytorch/language-modeling/run_clm.py \ + --model_name_or_path $MODEL \ + --dataset_name $DATASET \ + --output_dir output_dir \ + --overwrite_output_dir \ + --do_train \ + --do_eval \ + --max_train_samples 1000 \ + --max_eval_samples 200 \ + --per_device_train_batch_size 4 \ + --per_device_eval_batch_size 4 \ + --num_train_epochs 1 \ + --warmup_steps 8 \ + --block_size 64 \ + --fp16 \ + --report_to none +``` + +### HF transformers + Deepspeed + zero2 + + + +``` +PYTHONPATH="src" \ +HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \ +deepspeed --num_nodes 1 --num_gpus 4 \ +examples/pytorch/language-modeling/run_clm.py \ + --model_name_or_path $MODEL \ + --dataset_name $DATASET \ + --output_dir output_dir \ + --overwrite_output_dir \ + --do_train \ + --do_eval \ + --max_train_samples 1000 \ + --max_eval_samples 200 \ + --per_device_train_batch_size 4 \ + --per_device_eval_batch_size 4 \ + --num_train_epochs 1 \ + --warmup_steps 8 \ + --block_size 64 \ + --fp16 \ + --report_to none \ + --deepspeed tests/deepspeed/ds_config_zero2.json +``` + +Speed: + +train_samples_per_second = 2.14 + +### HF transformers + Deepspeed + zero3 + +probably should test w/o offload + +``` +PYTHONPATH="src" \ +HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \ +deepspeed --num_nodes 1 --num_gpus 4 \ +examples/pytorch/language-modeling/run_clm.py \ + --model_name_or_path $MODEL \ + --dataset_name $DATASET \ + --output_dir output_dir \ + --overwrite_output_dir \ + --do_train \ + --do_eval \ + --max_train_samples 1000 \ + --max_eval_samples 200 \ + --per_device_train_batch_size 4 \ + --per_device_eval_batch_size 4 \ + --num_train_epochs 1 \ + --warmup_steps 8 \ + --block_size 64 \ + --fp16 \ + --report_to none \ + --deepspeed tests/deepspeed/ds_config_zero3.json +``` + +Speed: + +train_samples_per_second = 0.952 + + + +### HF transformers + Deepspeed + zero2 - multi-node + + +Use `jay-z/slurm/hf-ds-gpt2-multi-node.slurm`. + +Speed: / iteration diff --git a/jz/archs/t5.md b/jz/archs/t5.md new file mode 100644 index 0000000000000000000000000000000000000000..23f3061979c915e4a565b437f56edab75e7f05ef --- /dev/null +++ b/jz/archs/t5.md @@ -0,0 +1,172 @@ +# T5 Comparisons + + + +## Data + +Using OpenWebText https://huggingface.co/datasets/openwebtext + +``` +from datasets import load_dataset +dataset = load_dataset("openwebtext", split='train') +dataset = load_dataset("stas/openwebtext-10k", split='train') +``` + + +Megatron-LM t5 uses a subword-tokenized vocab from bert. + +Ready datasets: + +1. HF datasets use: + + * `openwebtext` - 8M records `--dataset_name "openwebtext"` + * `stas/openwebtext-10k` - 10K records `--dataset_name "stas/openwebtext-10k"` + +2. Jsonlines (derived): + + * `$six_ALL_CCFRWORK/datasets-custom/openwebtext/openwebtext.jsonl` + * `$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/openwebtext-10k.jsonl` + +3. Megatron-preprocessed datasets (derived): + + * `$six_ALL_CCFRWORK/datasets-custom/openwebtext/meg-t5_text_document.*` + * `$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-t5_text_document.*` + +4. Vocabs (from HF): + + * `$six_ALL_CCFRWORK/datasets-custom/vocabs/bert-large-uncased-vocab.txt` + + +#### How the above was done + + +For HF datasets and Jsonlines creation details, see [gpt2.md](./gpt2.md). We only need to create the differently pre-processed datasets here. + +t5 uses the same tokenizer/indexer as bert - can use it for either t5 or bert meg-lm trainings + +Get uncased bert vocab: +``` +cd $six_ALL_CCFRWORK/datasets-custom/vocabs +wget https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt -O bert-large-uncased-vocab.txt +``` + + +To prep a 10k-sample for megatron +``` +source $six_ALL_CCFRWORK/start-prod +cd $six_ALL_CCFRWORK/code/megatron-lm +python tools/preprocess_data.py \ + --input $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/openwebtext-10k.jsonl \ + --output-prefix $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-t5 \ + --vocab $six_ALL_CCFRWORK/datasets-custom/vocabs/bert-large-uncased-vocab.txt \ + --dataset-impl mmap \ + --tokenizer-type BertWordPieceLowerCase \ + --split-sentences \ + --workers 8 +``` + +To prep a full dataset for megatron +``` +source $six_ALL_CCFRWORK/start-prod +cd $six_ALL_CCFRWORK/code/megatron-lm +python tools/preprocess_data.py \ + --input $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/openwebtext.jsonl \ + --output-prefix $six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-t5 \ + --vocab $six_ALL_CCFRWORK/datasets-custom/vocabs/bert-large-uncased-vocab.txt \ + --dataset-impl mmap \ + --tokenizer-type BertWordPieceLowerCase \ + --split-sentences \ + --workers 8 + +``` +as it should take a few hours to convert, use `slurm/jsonl-to-meg-t5.slurm` job to complete it +``` +sbatch jsonl-to-meg-t5.slurm +``` + + + + +## Training + +### Megatron-LM distributed with MP + +Pipeline Parallelism is not yet support for T5 (in works) + +Setup: 1 node / 4 gpus +``` +srun --pty --nodes=1 --ntasks=1 --cpus-per-task=40 --gres=gpu:4 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + +``` +cd $six_ALL_CCFRWORK/code/megatron-lm + +GPUS_PER_NODE=4 + +# Change for multinode config +MASTER_ADDR=localhost +MASTER_PORT=6000 +NNODES=1 +NODE_RANK=0 +WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) + +VOCAB_FILE=$six_ALL_CCFRWORK/datasets-custom/vocabs/bert-large-uncased-vocab.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/openwebtext-10k/meg-t5_text_sentence +SAVE_CHECKPOINT_PATH=$six_ALL_CCFRWORK/checkpoints/t5 + +DISTRIBUTED_ARGS=" \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --node_rank $NODE_RANK \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +# from t5 training: +# --global-batch-size 2048 \ +GPT_ARGS=" \ + --num-layers 12 \ + --hidden-size 768 \ + --num-attention-heads 12 \ + --kv-channels 64 \ + --ffn-hidden-size 3072 \ + --encoder-seq-length 512 \ + --decoder-seq-length 128 \ + --micro-batch-size 16 \ + --max-position-embeddings 512 \ + --train-iters 1000000 \ + --lr-decay-iters 1000000 \ + --lr 0.0001 \ + --min-lr 0.00001 \ + --lr-decay-style linear \ + --lr-warmup-fraction .01 \ + --weight-decay 1e-2 \ + --clip-grad 1.0 \ + --fp16 \ + " + +OUTPUT_ARGS=" \ + --log-interval 10 \ + --save-interval 500 \ + --eval-interval 100 \ + --eval-iters 10 \ + " + +python -m torch.distributed.launch \ + $DISTRIBUTED_ARGS \ + pretrain_t5.py \ + --tensor-model-parallel-size 2 \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $SAVE_CHECKPOINT_PATH \ + --load $SAVE_CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --vocab-file $VOCAB_FILE \ + --vocab-extra-ids 100 \ + --split 949,50,1 \ + --distributed-backend nccl + + + +``` diff --git a/jz/envs/README.md b/jz/envs/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1036aebd262ac8370bec0b3655f1808a9325ff33 --- /dev/null +++ b/jz/envs/README.md @@ -0,0 +1,662 @@ +# Work Environment Info + + +## Users and Accounts + +**Accounts:** + +- `six` - the BigScience allocation - our main allocation +- `ajs` - original dynamic access allocations - use it if you can as we still have resources there - but it will give low priority on scheduling - hence use primarily for jobs that can be bumped down in the queue for a few days. + +To switch to `six` as the main project: +``` +idrproj -d six +``` +and logout/login. + +Check which projects one belongs to: `idrproj` + +**Users:** + +Use `idracct six` to see which username belongs to which real person. + + +## First time setup + +Make sure that your `~/.bashrc` is executed on login by creating if you don't already have `~/.bash_profile` with contents: + +``` +# if running bash +if [ -n "$BASH_VERSION" ]; then + # include .bashrc if it exists + if [ -f "$HOME/.bashrc" ]; then + . "$HOME/.bashrc" + fi +fi +``` + +It of course could have other contents, but make sure the above is there. + +Now add this to your `~/.bashrc` and run `bash` for the changes to take effect. + +``` +# ~/.bashrc: executed by bash(1) for non-login shells. +[[ $- != *i* ]] && return + +# Log in with correct group - relevant to all users as we have multiple groups we belong to +if [[ $(id -gn) != "six" ]] +then + newgrp six + exit +fi + +# start production environment: +# this loads modules, conda and sets all the relevant env vars +alias start-prod="source $six_ALL_CCFRWORK/start-prod" + +# our production conda env is here: +export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda + +# SLURM / Account specific settings + +# share dirs/files with the group +umask 0007 + +# specific caches +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export DATASETS_CUSTOM=$six_ALL_CCFRWORK/datasets-custom + +# shortcut +export PROD=$six_ALL_CCFRWORK + +# handy shortcuts +alias myjobs="squeue -u `whoami`" + +# our shared conda base +export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda +``` + +note: wrt `newgrp six` - if you want to use it elsewhere and not `~/.bashrc` you may use this `newgrp - six` syntax instead, but don't use it in `~/.bashrc` or it will break many things. + +Also since most of our work is at `$six_ALL_CCFRWORK` you may want to add symlinks: +``` +ln -s $six_ALL_CCFRWORK ~/prod +ln -s $six_ALL_CCFRSCRATCH ~/prod-scratch +ln -s $six_ALL_CCFRSTORE ~/prod-store +ln -s /gpfsssd/worksf/projects/rech/six/commun ~/prod-worksf +``` +and then you can quickly `cd` there w/o needing to type too much, and with the shortcut `$PROD` env var you now you can do one of 2 ways: +``` +cd ~/prod +cd $PROD +``` + +Some users prefer to use the env vars, so let's try to not expect the symlinks to be there for everybody. + +If you intend to use `gsutil`, add the following lines: + +``` +if [ -f '/gpfsssd/worksf/projects/rech/six/commun/lib/google-cloud-sdk/path.bash.inc' ]; then . '/gpfsssd/worksf/projects/rech/six/commun/lib/google-cloud-sdk/path.bash.inc'; fi +if [ -f '/gpfsssd/worksf/projects/rech/six/commun/lib/google-cloud-sdk/completion.bash.inc' ]; then . '/gpfsssd/worksf/projects/rech/six/commun/lib/google-cloud-sdk/completion.bash.inc'; fi +``` + +Without them, `gsutil` on Jean Zay fails with a hard-to-debug `TypeError: argument should be integer or bytes-like object, not 'str'` error. + +## Production environment + +In order to use the production environment, run: + +``` +start-prod +``` +which will: +- setup env vars +- configure nice git-prompt with lots of useful info built in +- load the right `module`s +- activate our custom production conda environment which has everything in it + +so basically use it when running production scripts. + +The alias should have been set in `~/.bashrc` as instructed above. + +Note: the fancy [bash-git-prompt](https://github.com/magicmonty/bash-git-prompt) tells you which conda env you are in, and then which branch your are in and a ton of useful git enfo, and it was extended to tell you whether you're in the login instance (prefix `0-1`) or whether you're on a GPU instance where it then shows something like `4-40` - the 2 numbers stand for `${SLURM_NNODES}-${SLURM_CPUS_PER_TASK}` - so you know what `srun` configuration you're logged into (or the login shell where you get no nodes, with 0 gpus and 1 cpu hence `0-1`). + +The production conda env `hf-prod` is too set up already, so you don't need to do anything, but here are some details on how it was done should you want to know. + +Our production shared conda env is at `$six_ALL_CCFRWORK/conda`, you can make it visible by either doing this one: +``` +conda config --append envs_dirs $six_ALL_CCFRWORK/conda +``` +which will add this path to `~/.condarc` or use: +``` +export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda +``` +in your `~/.bashrc`. + +You can use it for anything but please don't install anything into it (unless coordinating with others), as we want this to be a reliable environment for all to share. + +Additionally you will most likely will want to do: + +``` +mv ~/.conda ~/.conda-old +ln -s $six_ALL_CCFRWORK/.conda ~/.conda +``` + +because otherwise conda will try to use your HOME dir which is only 3GB-large. You can then nuke `~/.conda-old` or move it elsewhere. + + + + +## Creating production conda env + +**Do not run any of the instructions in this section**. Please co-ordinate any changes to this environment on #bigscience-jz on slack since many users use it for their experiments. If you want to create your custom conda env, please read the following sections instead. + +If the production environment got broken, here is how it can be re-built. + +This should be done on a login instance, since we need the network. + +``` +export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda + +conda create -y -n hf-prod python=3.8 +conda activate hf-prod + +# pt-1.10.1 / cuda 11.3 +conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch +pip install deepspeed + +cd $six_ALL_CCFRWORK/code/transformers +pip install -e .[dev] + +cd $six_ALL_CCFRWORK/code/Megatron-DeepSpeed +pip install -r requirements.txt + +cd $six_ALL_CCFRWORK/code/deepspeed +./build.sh + +# to build custom tokenizers make sure that if run on JZ your `~/.cargo/config.toml` contains the following: +[net] +git-fetch-with-cli = true + +# if needed first: +# git clone https://github.com/huggingface/tokenizers $six_ALL_CCFRWORK/code/tokenizers +cd $six_ALL_CCFRWORK/code/tokenizers +git checkout bigscience_fork +module load rust +pip install setuptools_rust +pip install -e bindings/python +``` + +while we are going to override some of these with our custom installs, we first install these normally to get all the dependencies right. + +Then finally to build apex you need a non-login instance since it is very demanding on resources and such build on the login instance will get killed: + +``` +srun --pty -A six@cpu --qos=qos_cpu-dev --nodes=1 --ntasks=1 --cpus-per-task=10 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod +cd $six_ALL_CCFRWORK/code/apex +./build.sh +``` +Note: if using a no-gpu instance to build `apex` it will warn that it can't detect any GPUs but will cross-compile for several archs. But you could also tell it to build for V100 and A100 explicitly by simply adding the desired archs: + +``` +TORCH_CUDA_ARCH_LIST="7.0 8.0" pip install ... +``` + +## Personal environment + +You can use these dirs, which are your private spaces: + +- `$WORK` +- `$SCRATCH` +- `$STORE` + +So you probably want to mimic the production env, + +We also agreed to use + +``` +ln -s $WORK ~/user +ln -s $SCRATCH ~/user-scratch +ln -s $STORE ~/user-store +``` +and then you can quickly `cd` there w/o needing to type too much: +``` +cd ~/user +``` + +Since we are going to use `~/user/...` in scripts, it now should be possible to re-use our scripts w/o modifying them. To change the script to use the production setup, it'll be just `s/user/prod/`. + + + +## Custom private conda env + +First follow the instructions for [Production environment](production-environment) which should have already set up most things to make it very easy to add your custom conda env. + +If wanting to work with variations of packages, create your own conda env, e.g. env `stas`: + +``` +export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda + +conda create -y -n stas python=3.8 +conda activate stas +conda install pytorch torchvision cudatoolkit=11.3 -c pytorch-lts -c nvidia +pip install deepspeed + +cd ~/user/code/transformers +pip install -e .[dev] + +cd ~/user/code/Megatron-Deepspeed +pip install -r requirements.txt + +cd ~/user/code/deepspeed +./build.sh + +cd ~/user/code/apex +./build.sh +``` + +See a special note on how to build apex in [Creating production conda env](creating-production-conda-env). + + +## Login node + +If the login node is heavily used by someone, one can switch to another node + +`host jean-zay.idris.fr` will tell you which login nodes are currently in the alias + +if the DNS round robin doesn't send you to another login node, you can target a specific login node (`jean-zayN.idris.fr` , with N from 1 to 5, though some might not be available so using the alias is always better) + + +## Dealing with running out of disc space + +Find out where disc space is used up: +``` +du -ahd1 $six_ALL_CCFRWORK | sort -rh +du -ahd1 $six_ALL_CCFRSTORE | sort -rh +``` + +Find out where inodes are used up: +``` +du -ahd1 --inodes $six_ALL_CCFRWORK | sort -rh +du -ahd1 --inodes $six_ALL_CCFRSTORE | sort -rh +``` + +Some busy git clones can be pruned of unused files with: `git gc`, e.g. to prune a dir with multiple-clones as sub-dirs: + +``` +cd $six_ALL_CCFRWORK/code +du -hs . +du -hs --inodes . +find . -mindepth 1 -maxdepth 1 -type d -exec bash -c "cd '{}' && git gc" + +du -hs . +du -hs --inodes . +``` + +## Finding things + +Our WORK is indexed by mlocate, after adding this alias: +``` +alias locate="/usr/bin/locate -d $ALL_CCFRWORK/lib/mlocate/work.db:$ALL_CCFRWORK/lib/mlocate/worksf.db" +``` +You can now do: +``` +locate -i megatron +``` +(remove `-i` if you want case-sensitive search) + +the index is being updated by `$six_ALL_CCFRWORK/bin/mlocate-update` in a crontab job in `$six_ALL_CCFRWORK/cron/cron.daily/mlocate-update.slurm`. + +For more details on the emulated crontab job see: [crontab](../crontab/README.md). + + +## Syncing the perms + +We use `umask 0007` in `~/.bashrc` to get the shared dirs have `g+rwx` perms, so that we can all operate on those, but it doesn't always help. When a tarball is extracted it will often retain the original perms on the files, so if those didn't have `w` for the group it'll remain as such. Therefore occasionally and especially after installing a new dataset please run: + +We also need `g+s` on dirs, so that new dirs and files created in the sub-dir get created with the same group as the parent dir (e.g. important when `scp`-ing from outside, but also in many other cases). + +Then note that `chgrp` removes the sgid bit, as it has to be restored immediately, so do not run it alone! + +For some reason group perms go wrong at times. We need all files to be `g+wrxs` (dirs), `g+rw` (files), `six` (group name), so here is how to fix things back to normal: + +``` +find $six_ALL_CCFRWORK -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chgrp six {} + , -execdir chmod g+rwxs {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)" +find $six_ALL_CCFRWORK -user `whoami` -type d ! \( -readable -executable \) -prune -o -type f -execdir chgrp six {} + , -execdir chmod g+rw {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)" +find /gpfsssd/worksf/projects/rech/six/commun -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chgrp six {} + , -execdir chmod g+rwxs {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)" +find /gpfsssd/worksf/projects/rech/six/commun -user `whoami` -type d ! \( -readable -executable \) -prune -o -type f -execdir chgrp six {} + , -execdir chmod g+rw {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)" +find $six_ALL_CCFRSCRATCH -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chgrp six {} + , -execdir chmod g+rwxs {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)" +find $six_ALL_CCFRSCRATCH -user `whoami` -type d ! \( -readable -executable \) -prune -o -type f -execdir chgrp six {} + , -execdir chmod g+rw {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)" +find $six_ALL_CCFRSTORE -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chgrp six {} + , -execdir chmod g+rwxs {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)" +find $six_ALL_CCFRSTORE -user `whoami` -type d ! \( -readable -executable \) -prune -o -type f -execdir chgrp six {} + , -execdir chmod g+rw {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)" +``` + +If somehow we lost the sgid bit on some dirs, to restore just those: +``` +find $six_ALL_CCFRWORK -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chmod g+s {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)" +find /gpfsssd/worksf/projects/rech/six/commun -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chmod g+s {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)" +find $six_ALL_CCFRSCRATCH -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chmod g+s {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)" +find $six_ALL_CCFRSTORE -user `whoami` -type d ! \( -readable -executable \) -prune -o -type d -execdir chmod g+s {} + 2>&1 | egrep -v "(Operation not permitted|cannot operate on dangling symlink)" +``` +albeit, the set of commands above should have already done the right thing, as they include `g+rwxs`. + + + +## Activate production script + +This can be safely added at the beginning of slurm scripts: + +``` +source $six_ALL_CCFRWORK/start-prod +``` + +And if you made the symlink from your `$HOME`, interactively it's easier to remember to type: + +``` +source $six_ALL_CCFRWORK/start-prod +``` + + + +## Building things from source + + +The building should happen on a beefy instance - or things just get killed + +Normally use the free `-p compil` partition: + +``` +srun --pty -A six@cpu -p compil --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + +if it doesn't yield use `idrsrv` ones by adding `-c 10` (10 cpu cores) +``` +srun --pty -A six@cpu -p compil -c 10 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + +but if it has to be really fast, use a dedicated instance with pre-allocated cpu cores: +``` +srun --pty -A six@cpu --nodes=1 --ntasks=1 --cpus-per-task=10 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + +same with 1 gpu if the build env requires one (neither `apex` nor `deepspeed` require one): +``` +srun --pty -A six@gpu --nodes=1 --ntasks=1 --cpus-per-task=10 --gres=gpu:1 --hint=nomultithread --time=60 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + +`/tmp` is tiny on gpu instances, at least apex needs a big `/tmp` folder: + + +Quick instructions (detailed listing follow): + +``` +export TMPDIR=$six_ALL_CCFRWORK/tmp +mkdir -p $TMPDIR + +cd $six_ALL_CCFRWORK/code/deepspeed +./build.sh + +cd $six_ALL_CCFRWORK/code/apex +./build.sh +``` + + +### deepspeed + + +To pre-build deepspeed (as compared to have it built via JIT at runtime): + +``` +export TMPDIR=$six_ALL_CCFRWORK/tmp +mkdir -p $TMPDIR +cd $six_ALL_CCFRWORK/code/deepspeed +./build.sh +``` + +what's in the build: +``` +$ cat build.sh +#!/bin/bash + +rm -rf build + +time TORCH_CUDA_ARCH_LIST="7.0 8.0" DS_BUILD_CPU_ADAM=1 DS_BUILD_UTILS=1 pip install -e . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 | tee build.log +``` + +### apex + +To build apex (needed by megatron-lm): + +build: +``` +cd $six_ALL_CCFRWORK/code/apex +./build.sh +``` + +what's in the build: +``` +$ cat build.sh +#!/bin/bash + +pip install --global-option="--cpp_ext" --global-option="--cuda_ext" --no-cache -v --disable-pip-version-check . 2>&1 | tee build.log +``` + +Note that since we are using pt/cuda-11.1 and JZ has cuda-11.2, apex won't build unless we skip the version check (which is totally not necessary - things work just fine), so should you reset the clone and removed the local patch, you can restore it with this diff: https://github.com/NVIDIA/apex/issues/988#issuecomment-726343453 + + + +## Aliases + +``` +# autogenerate the hostfile for deepspeed +# 1. deals with: SLURM_JOB_NODELIST in either of 2 formats: +# r10i1n8,r10i2n0 +# r10i1n[7-8] +# 2. and relies on SLURM_STEP_GPUS=0,1,2... to get how many gpu slots per node +# +# usage: +# makehostfile > hostfile +function makehostfile() { +perl -e '$slots=split /,/, $ENV{"SLURM_STEP_GPUS"}; +$slots=4 if $slots==0; # workaround 4 gpu machines +while ($ENV{"SLURM_JOB_NODELIST"} =~ m/(\w+)(?:\[([\d-,]+)\])?,?/msg) { +$b=$1; $s=$2||q[""]; $s=~s/-/../g; +print map { "$b$_ slots=$slots\n" } eval $s }' +} +``` + +``` +# auto-extract the master node's address from: SLURM_JOB_NODELIST1 which may contain r10i1n3,r10i1n[5-8],r10i1n7 +# so here we want r10i1n3 +function get_master_address() { +perl -le '$_=$ENV{"SLURM_JOB_NODELIST"}; s/,.*//; s/-.*//; s/\[//; print' +} +``` + +Better solutions for the same as above: + +``` +# autogenerate the hostfile for deepspeed +# 1. deals with: SLURM_JOB_NODELIST in either of 2 formats: +# r10i1n8,r10i2n0 +# r10i1n[7-8] +# 2. and relies on SLURM_STEP_GPUS=0,1,2... to get how many gpu slots per node +# +# usage: +# makehostfile > hostfile +function makehostfile() { +perl -e '$slots=split /,/, $ENV{"SLURM_STEP_GPUS"}; +$slots=8 if $slots==0; # workaround 8 gpu machines +@nodes = split /\n/, qx[scontrol show hostnames $ENV{"SLURM_JOB_NODELIST"}]; +print map { "$b$_ slots=$slots\n" } @nodes' +} +``` + +``` +# auto-extract the master node's address from: SLURM_JOB_NODELIST1 which may contain r10i1n3,r10i1n[5-8],r10i1n7 +# so here we want r10i1n3 +function get_master_address() { +echo $(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +} +``` + + +## Troubleshooting + +### pip install + +If it's trying to install into your local `~/.local` folder it's because `pip` is in that `$PATH` before +`$six_ALL_CCFRWORK/conda/hf-prod/bin/` - push the last one to be first - or best don't install any python things locally - use conda for that. Check with `which pip` - it should be under `$six_ALL_CCFRWORK/conda/hf-prod/bin/pip`. + + + +### Running `py-spy` diagnostics on multiple nodes at once + +To do some monitoring of multiple nodes running an `srun` job: + +(This is just an example of starting a job, most of the time it'll be running already: +``` +cd ~/prod/code/tr8b-104B/bigscience/train/tr11-200B-ml/ + +salloc --partition=gpu_p5 --constraint=a100 --nodes=48 --ntasks-per-node=1 --cpus-per-task=64 --hint=nomultithread --gres=gpu:8 --time 20:00:00 --account=six@a100 + +bash 200B-n40-bf16-mono.slurm +``` + +Then in another shell: + +``` +squeue -u `whoami` -o "%.16i %.9P %.26j %.8T %.10M %.8l %.6D %.20S %R" +srun --overlap --jobid=1729333 --gres=gpu:0 --nodes=48 --tasks-per-node=1 --output=trace-%N.out sh -c 'source $six_ALL_CCFRWORK/start-prod; pgrep -P $(pgrep -o python) | xargs -I {} py-spy dump --pid {}' || echo "failed" +``` + +This will create a log file per node, e.g. `trace-jean-zay-iam52.out` which will contain the output of the command on that node. + +Notes: +- adjust `--jobid` to the desired job (output of `squeue`). If using a job array and the job id looks like `1728318_2` first translate the virtual JobId into an actual JobID: +``` +scontrol show job 1728318_2 | perl -nle 'm/JobId=(\d+)/ && print $1' +``` +- adjust `--nodes=48` to match the same setting as the original `salloc` or `srun` command +- `--overlap` allows a new job to run on nodes allocated by another job. + +`py-spy`-specific notes: + +- run the command via `sh`. It may be possible to run `bash`, but I run into `py-spy: Permission denied` - it shouldn't need `sudo` but something in my bash dotfile triggers this problem, even though it doesn't happen if I run bash interactively. +- `pgrep -P $(pgrep -o python)` will give the immediate children of the launcher - 8 processes per node on A100 - which is what we want most of the time. +- if you want all children and grandchildren (e.g. dataloader helpers) - can be hundreds of processes! then use just `pgrep python` + + + +#### using ds_ssh + +It's a bit tricky and doesn't work for `py-spy` (see notes in the section above - it seems to do with `bash`'s dotfiles). + + +``` +salloc --partition=gpu_p5 --constraint=a100 --nodes=2 --ntasks-per-node=1 --cpus-per-task=64 --hint=nomultithread --gres=gpu:8 --time 20:00:00 --account=six@a100 +``` + +``` +bash 20B-n2-fp16.slurm +``` + +``` +function makehostfile() { +perl -e '$slots=split /,/, $ENV{"SLURM_STEP_GPUS"}; +$slots=8 if $slots==0; # workaround 8 gpu machines +@nodes = split /\n/, qx[scontrol show hostnames $ENV{"SLURM_JOB_NODELIST"}]; +print map { "$b$_ slots=$slots\n" } @nodes' +} +makehostfile > hostfile +``` + +``` +ds_ssh -f hostfile "source ~/.pdshrc; nvidia-smi" +``` + +the tricky part is to get the remote env loaded, I have a mostly ok hack, but which doesn't work for `py-spy` - something is wrong in the env. + +So the special env-loading file is: +``` +$ cat ~/.pdshrc + +source /etc/profile.d/z_modules.sh; + +#source ~/.bashrc + +module purge +#module load pytorch-gpu/py3/1.8.1 +module load nvtop git git-lfs github-cli mc + +# specific caches + +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export DATASETS_CUSTOM=$six_ALL_CCFRWORK/datasets-custom + +### CONDA ### + +# >>> conda initialize >>> +# !! Contents within this block are managed by 'conda init' !! +__conda_setup="$('/gpfslocalsup/pub/anaconda-py3/2020.02/bin/conda' 'shell.bash' 'hook' 2> /dev/null)" +if [ $? -eq 0 ]; then + eval "$__conda_setup" +else + if [ -f "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" ]; then + . "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" + else + export PATH="/gpfslocalsup/pub/anaconda-py3/2020.02/bin:$PATH" + fi +fi +unset __conda_setup +# <<< conda initialize <<< + +conda activate base +conda activate /gpfswork/rech/six/commun/conda/py38-pt111 +``` + +`ds_ssh` uses pdsh behind the scenes. + +Note that `py-spy` works just fine when actually ssh'ed to the compute node: + +``` +ps aux | grep python | egrep -v '(srun|grep)' | grep `whoami` | awk '{print $2}' | xargs -I {} py-spy dump --pid {} +``` + +#### using pdsh + +To access just one running node it's simpler to just use `pdsh` directly. + +``` +pdsh -w jean-zay-iam01 "source ~/.pdshrc; nvidia-smi" +``` + + +## Older info + +Probably of no use any longer, but still here in case it is needed (might move to another file). + +## Local resources + +For your own personal explorations you can either create your own `conda` envr or use your local python, which has a few of issues, but it allows you to continue using JZ's pytorch `module`. + +`pip install` installs into `$HOME/.local/lib/python3.7/site-packages`, however system-wide packages may take precedence. For example to do `develop` install of transformers use this workaround: +``` +git clone https://github.com/huggingface/transformers +cd transformers +pip install --user --no-use-pep517 -e . +``` + +May still have to override `PYTHONPATH=$WORK/hf/transformers-master/src` (edit to wherever your clone is) if you want to emulate `develop` build. Test: +``` +export PYTHONPATH=$WORK/hf/transformers-master/src +python -c "import transformers; print(transformers.__version__)" +# 4.6.0.dev0 +``` + +See [`envs`](./envs) for instructions on how to build conda and packages diff --git a/jz/envs/apex/build.sh b/jz/envs/apex/build.sh new file mode 100644 index 0000000000000000000000000000000000000000..cee436a0388ff2cbe403bf7f6e205c8b1b866938 --- /dev/null +++ b/jz/envs/apex/build.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +pip install --global-option="--cpp_ext" --global-option="--cuda_ext" --no-cache -v --disable-pip-version-check . 2>&1 | tee build.log + diff --git a/jz/envs/deepspeed/build.sh b/jz/envs/deepspeed/build.sh new file mode 100644 index 0000000000000000000000000000000000000000..f1c2e7bba72370af73de60e0414d857bbc8725ae --- /dev/null +++ b/jz/envs/deepspeed/build.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +rm -rf build + +time TORCH_CUDA_ARCH_LIST="7.0" DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_FUSED_LAMB=1 DS_BUILD_TRANSFORMER=1 DS_BUILD_STOCHASTIC_TRANSFORMER=1 DS_BUILD_UTILS=1 pip install -e . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 | tee build.log + +# time TORCH_CUDA_ARCH_LIST="7.0" DS_BUILD_OPS=1 pip install -e . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 | tee build.log diff --git a/math/README.md b/math/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e471d0a97e3507aa62729d8fb00b8acd63a75a98 --- /dev/null +++ b/math/README.md @@ -0,0 +1,132 @@ +# Handy Math + + +## Estimate model training time + +in days: +``` + (X billion tokens)*(8* M billion parameters)/(N_GPUs * Achieved_TFLOPs * 1e12*60*60*24) +``` + +`Achieved_TFLOPs` is measured by running experiments that tune up the setup for the best throughput performance. + +For example, for a 13 billion parameter model, trained for 300 billion tokens, on 256 GPUs at 45 TFLOPs would take: `(300 billion)*(8*13 billion)/(256*45*1 trillion *60*60*24) = ~31 days` + +``` +$ python -c 'Btokens=300; Bmodel=13; n_gpus=256; Tflops=45; \ +print(f"{Btokens*1e9*8*Bmodel*1e9/(n_gpus*Tflops*1e12*60*60*24):0.2f} days")' +31.35 days +``` + +Notes: + +- the factor of 8 can be broken into `(2 x (1+2+1))` where the factor of 2 is for multiple+add, the two ones are for forward propagation and recomputation in the backward and the 2 is for the backward propagation. + +contributed by Samyam Rajbhandari + + +## Calculate TFLOPs + +The following is an estimation formula which slightly under-reports the real TFLOPs: + +TFLOPs: `model_size_in_B * 4 * 2 * seqlen * global_batch_size / (time_in_sec_per_interation * total_gpus * 1e3)` + +The factor of 4 is when used with activation check-pointing, otherwise it will be 3, but for 100B+ model, activation check-pointing will always be on. + +So the `3*2` is often called "model FLOPs" and `4*2` - "hardware FLOPs". + +``` +perl -le '$ng=64; $ms=52; $gbs=1024; $sp=127; $seqlen=2048; print $ms*4*2*$seqlen*$gbs / ( $sp * $ng * 1e3)' +``` +(ng = total gpus, ms = model size in B, gbs = global batch size, sp = throughput in seconds) + +same with bash env vars and broken down GBS into mbs*dp*gas (gas=pp_chunks): +``` +echo "($MSIZE*4*2*SEQLEN*$MICRO_BATCH_SIZE*$DP_SIZE*$GAS)/($THROUGHPUT*$NNODES*4*1000)" | bc -l +``` + +- Automatically process slurm/ megatron log files, average the throughput (prints 'fail' on when the training failed w/o producing a single iteration stat): +``` +find . -type f -name "*out" -exec perl -lne 'm|elapsed time per iteration .ms.: ([\d\.]+)| && do {$x+=$1; $c++}; END { print "$ARGV " . ($c ? int($x/$c/1000) : "fail")}' {} \; | sort | grep -v fail +``` + +The exact formula is in Equation 3 of Section 5.1 of the [Efficient Large-Scale Language Model Training on GPU Clusters Using Megatron-LM](https://arxiv.org/abs/2104.04473) paper. You can see the code [here](https://github.com/bigscience-workshop/Megatron-DeepSpeed/pull/251). + +For Inference only it'd be: + +`24Bsh^2 + 4𝐵s^2h` floating point operations per layer + + +## Model sizing + +### Params as a function of the network size hyperparams + +``` +NHIDDEN=4096; NLAYERS=36; SEQ_LEN=512; VOCAB_SIZE=50257; python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + s*h + 2*h) / 10**9 :.0f}B, ratio={int(h/l)}')" +``` + +For full details see [Calculate model size](../experiments/gpt2-utils.md). + +The BLOOM architecture hasn't used the normal positional embedding, so the formula is slightly different and it no longer depends on SEQLEN, and we have added an additional layer norm after the word embedding so `s/s*h + 2*h/4*h` in the formula above: +``` +NHIDDEN=14336; NLAYERS=70; NHEADS=112; VOCAB_SIZE=250000; python -c "h=$NHIDDEN; l=$NLAYERS; n=$NHEADS; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + 4*h) / 10**9 :.0f}B, hidden/layers ratio: {int(h/l)}, hidden/heads ratio: {int(h/n)}')" +``` + +### Width-depth tradeoff + +From [The Depth-to-Width Interplay in Self-Attention](https://arxiv.org/abs/2006.12467): + +``` +NLAYERS=70; python -c "import math; l=$NLAYERS; a = 5.039; b = 5.55e-2; print(f'Optimal n_params: {12 * l * math.exp(2*a) * math.exp(2*b*l) / 10**9 :.0f}B')" +``` +This seems to be less important as the number of parameters scales up, but is useful to ground the discussion. + + +## Estimate total training time + +Training Time Estimates. Given these throughputs, we can also estimate the total amount of time needed for end-to-end training on 𝑇 tokens. Training requires 𝐼 = 𝑇 /(𝐵 · 𝑠) iterations. Using the value of 𝐹 from equation (3) and empirical end-to-end throughputs from Table 1 (denoted by 𝑋), we can estimate total training time. We note that for the configurations in Table 1, we have 6ℎ ≫ 𝑠, 16𝑙ℎ ≫ (𝑉 + 𝑠), and 12𝑙ℎ ≫ 𝑉 . Combining these observations with equations (2) and (3), we arrive at: + +End-to-end training time (seconds) ≈ 8𝑇𝑃/𝑛𝑋 + +Let us consider the GPT-3 model with 𝑃 =175 billion parameters as an example. This model was trained on 𝑇 = 300 billion tokens. On 𝑛 = 1024 A100 GPUs using batch size 1536, we achieve 𝑋 = 140 teraFLOP/s per GPU. As a result, the time required to train this model is 34 days. For the 1 trillion parameter model, we assume that 450 billion tokens are needed for end-to-end training. With 3072 A100 GPUs, we can achieve a per-GPU throughput of 163 teraFLOP/s, and end-to-end training time of 84 days. We believe these training times (using a reasonable number of GPUs) are practical. + + +This math and discussion is quoted from [Efficient Large-Scale Language Model Training on GPU Clusters Using Megatron-LM](https://arxiv.org/abs/2104.04473). + +Let's explain the formula: `8𝑇𝑃/𝑛𝑋` + +In the formula: + +- T: number of tokens used for training in Billions +- P: number of parameters in normal numbers +- n: number of GPUs +- X: throughput per GPU in TFLOPs +- The result is in seconds, so divide by 3600*24 to get days + +Example: + +- T = 300B +- P = 200_000_000 +- X = 150 TFLOPs (more or less the best one can get on an efficient setup on A100) +- n = 350 + +gives us: + +``` +$ python -c 'print(f"{8*300*200_000_000/(350*150)/(3600*24):0.2f}", "days")' +105.82 days +``` + +## Finding the checkpoint that has the amount of tokens you want + +Trying to find the step at which you reached the number of tokens you want for every model size +n_samples = n_tokens / 2048 +The average batch size during rampup is rampup_batch_size = 0.5 * (global_batch_size + start_batch_size) (edited) +The number of steps is rampup_samples / rampup_batch_size + (n_samples - rampup_samples) / global_batch_size = rampup_samples / 0.5 / (global_batch_size + start_batch_size) + (n_tokens / 2048 - rampup_samples) / global_batch_size. Those will all change for each model. For example for [tr11f](https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/smaller_models/tr11f-6B3-ml.slurm) at 150B tokens we have: + +> - $GLOBAL_BATCH_SIZE = 512 +> - --rampup-batch-size 192 32 9_765_625 which gives: +> - start_batch_size = 192 +> - rampup_samples = 9,765,625 +> +> so n_steps = 9,765,625 / 0.5 / (512 + 192) + (150,000,000,000 / 2048 - 9,765,625) / 512 = 151721 diff --git a/train/README.md b/train/README.md new file mode 100644 index 0000000000000000000000000000000000000000..67dc573cff41f469bcf108b99b06673a44f676ad --- /dev/null +++ b/train/README.md @@ -0,0 +1,38 @@ +## Training scripts + +This folder gathers training scripts for the different arch/scaling and engineering experiments. The naming convention is `tr-`. The current baseline that architecture and scaling experiments compare to is [tr3d](https://github.com/bigscience-workshop/bigscience/blob/master/train/tr3-1B3-baseline/tr3d-1B3-more-warmup.slurm). In order to launch a new experiment, you should probably start from the [arch-and-scaling template](https://github.com/bigscience-workshop/bigscience/blob/master/train/arch-and-scaling-template.slurm). + +Some tips: + - [TFlops optimization](https://github.com/bigscience-workshop/bigscience/blob/master/train/tflops_optimization.md): How to make sure that given a set of hardware you optimize the speed at which you train. + - [Instrumentation](https://github.com/bigscience-workshop/bigscience/blob/master/tools/README.md): How to sync with the hub + +## Stored checkpoints + +Location of the checkpoints of the trained models plus logs and anything else of importance - e.g. eval harness results: + +- tr1-13B: `gs://bigscience-backups/tr1-13B/` + +- tr3m-1B3-emb-norm-pile: `$six_ALL_CCFRSTORE/checkpoints/tr3m-1B3-emb-norm-pile` + +- tr4-1B3-rotary: `$six_ALL_CCFRSTORE/checkpoints/ +- tr4b-350M-rotary: `$six_ALL_CCFRSTORE/checkpoints/ +- tr4c-1B3-rotary-oscar: `$six_ALL_CCFRSTORE/checkpoints/tr4c-1B3-rotary-oscar` + +- tr6-1B3-prefix-lm: `$six_ALL_CCFRSTORE/checkpoints/tr6-1B3-prefix-lm` +- tr6-1B3-prefix-lm-unbiased-loss: `$six_ALL_CCFRSTORE/checkpoints/tr6-1B3-prefix-lm-unbiased-loss` +- tr6b-350M-prefix-lm: `$six_ALL_CCFRSTORE/checkpoints/tr6b-350M-prefix-lm` +- tr6b-350M-prefix-lm-PP2: `$six_ALL_CCFRSTORE/checkpoints/tr6b-350M-prefix-lm-PP2` +- tr6b-350M-prefix-lm-unbiased-loss: `$six_ALL_CCFRSTORE/checkpoints/tr6b-350M-prefix-lm-unbiased-loss` +- tr6c-350M-prefix-lm-reset-attention-mask: `$six_ALL_CCFRSTORE/checkpoints/tr6c-350M-prefix-lm-reset-attention-mask` +- tr6c-350M-prefix-lm-reset-attention-mask.backup: `$six_ALL_CCFRSTORE/checkpoints/tr6c-350M-prefix-lm-reset-attention-mask.backup` +- tr6d-350M-prefix-lm-pile: `$six_ALL_CCFRSTORE/checkpoints/tr6d-350M-prefix-lm-pile` +- tr6e-1B3-pile: `$six_ALL_CCFRSTORE/checkpoints/tr6e-1B3-pile` +- tr6f-1B3-oscar-no-loss-on-targets-only: `$six_ALL_CCFRSTORE/checkpoints/tr6f-1B3-oscar-no-loss-on-targets-only` +- tr6g-1B3-oscar-loss-reweighting: `$six_ALL_CCFRSTORE/checkpoints/tr6g-1B3-oscar-loss-reweighting` + +- tr7a-1B3-alibi (not a real alibi pos embedding experiment - the alibi matrix were not used in this experiment): `$six_ALL_CCFRSTORE/checkpoints/tr7a-1B3-alibi` +- tr7b-350-alibi (not a real alibi pos embedding experiment - the alibi matrix were not used in this experiment): `$six_ALL_CCFRSTORE/checkpoints/tr7b-350M-alibi` +- tr7d-1B3-alibi: `six_ALL_CCFRSTORE/checkpoints/tr7d-1B3-alibi` + +- tr9b-350M-swiglu: `six_ALL_CCFRSTORE/checkpoints/tr9b-350M-swiglu` +- tr9c-1B3-swiglu-pile: `six_ALL_CCFRSTORE/checkpoints/tr9b-1B3-swiglu-pile` diff --git a/train/memory.md b/train/memory.md new file mode 100644 index 0000000000000000000000000000000000000000..7a2f0d25d2c460ec73f2096a7fe9b90e80d440d3 --- /dev/null +++ b/train/memory.md @@ -0,0 +1,7 @@ +# Memory Utilization + +# Activation Partitioning + +> Activation Partitioning is a memory optimization in ZeRO that can reduce the memory consumed by activations during model parallel training (MP). In MP certain activations maybe required by all MP processes, resulting in a replication of activations across MP GPUs. Activation Partitioning stores these activations in a partitioned state once they are used for computation in the forward propagation. These activations are allgathered right before they are needed again during the backward propagation. By storing activations in a partitioned state, ZeRO in DeepSpeed can reduce the activation memory footprint proportional to the MP degree. + +To activate add `--partition-activations` diff --git a/train/sanity-checks.md b/train/sanity-checks.md new file mode 100644 index 0000000000000000000000000000000000000000..ce10181222a2db36a005e37f07fa85522c66f4c9 --- /dev/null +++ b/train/sanity-checks.md @@ -0,0 +1,59 @@ +# Sanity Checks + +When configuring the slurm script must ensure the following is strictly exact: + + +1. + +players: +- NHIDDEN +- NHEADS + +``` +NHIDDEN % NHEADS == 0 +``` + +2. + +players: +- GLOBAL_BATCH_SIZE +- MICRO_BATCH_SIZE +- DP_SIZE + +``` +GLOBAL_BATCH_SIZE % (MICRO_BATCH_SIZE * DP_SIZE) == 0 +``` + +3. + +players: +- NLAYERS +- PP_SIZE + +``` +NLAYERS % PP_SIZE == 0 +``` + +4. + + + + +5. Curriculum Learning Constraints + +- min_difficulty % 8 = 0 (to enable Tensor Core acceleration) + +- json ds config can't have numbers with '_' in them - invalid json - careful with substitutions. + + +## Restaring from existing checkpoint constraints + +XXX: quite a few of these - need to start collecting them all + +- can't change TP-size (But ok to change PP) + +- can't change max-lr or will get: + +``` +AnnealingLR: class input value 1e-05 and checkpointvalue 3e-05 for learning rate do not match +``` diff --git a/train/tr11-176B-ml/backup-schedule.md b/train/tr11-176B-ml/backup-schedule.md new file mode 100644 index 0000000000000000000000000000000000000000..1b3f476ebc4bfc400913ec50361f7f3f5d699f97 --- /dev/null +++ b/train/tr11-176B-ml/backup-schedule.md @@ -0,0 +1,142 @@ +# Backup Schedule + +First start an internet instance that won't get killed: + +``` +srun --pty -A six@cpu -p compil --hint=nomultithread --time=20:00:00 bash --rcfile $six_ALL_CCFRWORK/start-prod +``` + +then back up: + +## logs and eval-results (tiny) + +``` +gsutil rsync -x ".git" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/tr11-176B-ml-logs gs://bigscience-backups/tr11-176B-ml/tr11-176B-ml-logs +gsutil rsync -x ".git" -r $six_ALL_CCFRSTORE/checkpoints/tr11-176B-ml/eval-results gs://bigscience-backups/tr11-176B-ml/tr11-176B-ml-eval-results +``` + + +## full checkpoint (2.3TB) + +12 checkpoints: total 27TB + +``` +# done + +gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step3000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step3000 +gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step10000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step10000 +gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step20000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step20000 +gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step30000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step30000 +gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step40000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step40000 +gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step50000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step50000 +gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step60000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step60000 +gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step70000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step70000 +gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step80000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step80000 +gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step90000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step90000 +gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step95000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step95000 + +# in-progress + + + +# todo: + +gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step100000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step100000 +gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step110000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step110000 +gsutil rsync -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step120000 gs://bigscience-backups/tr11-176B-ml/checkpoints/global_step120000 + +``` + + + +## weights only checkpoints (0.33TB) + + +40 checkpoints: total 13TB - autogenerate the schedule: +``` +perl -le 'print qq[gsutil rsync -x "bf16.*" -r \$six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step$_ gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step$_] for map { $_*3000 } 1..40' +``` + +``` +# done + +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step3000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step3000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step6000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step6000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step9000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step9000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step12000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step12000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step15000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step15000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step18000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step18000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step21000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step21000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step24000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step24000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step27000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step27000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step33000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step33000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step36000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step36000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step39000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step39000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step42000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step42000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step45000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step45000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step48000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step48000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step51000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step51000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step54000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step54000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step57000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step57000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step63000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step63000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step66000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step66000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step69000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step69000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step72000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step72000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step75000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step75000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step78000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step78000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step81000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step81000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step84000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step84000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step87000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step87000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step93000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step93000 + +# in-progress + + + +# todo: + +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step96000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step96000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step99000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step99000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step102000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step102000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step105000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step105000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step108000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step108000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step111000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step111000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step114000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step114000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step117000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step117000 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step120000 gs://bigscience-backups/tr11-176B-ml/checkpoints-weights/global_step120000 + +``` + + + +## spikes + +weights only + +``` +# done + + + +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSTORE/checkpoints/tr11-176B-ml/checkpoints/spikes/global_step31200 gs://bigscience-backups/tr11-176B-ml/checkpoints-spikes-weights/global_step31200 +gsutil rsync -x "bf16.*" -r $six_ALL_CCFRSTORE/checkpoints/tr11-176B-ml/checkpoints/spikes/global_step31259 gs://bigscience-backups/tr11-176B-ml/checkpoints-spikes-weights/global_step31259 + +# in-progress + + + +# todo: + +``` + + +## Tarring the checkpoints in STORE + +Since we don't have too many inodes in STORE we ought to tar the checkpoints + +``` +cd /gpfsdsstore/projects/rech/six/commun/checkpoints/tr11-176B-ml/checkpoints +cd 1 +find * -maxdepth 0 -type d -exec tar cvf {}.tar {} \; + +``` diff --git a/train/tr11-176B-ml/chronicles-prequel.md b/train/tr11-176B-ml/chronicles-prequel.md new file mode 100644 index 0000000000000000000000000000000000000000..f92c5d1e3e44fbc83af4fe6c9423e2cc734bdf38 --- /dev/null +++ b/train/tr11-176B-ml/chronicles-prequel.md @@ -0,0 +1,1394 @@ +# Prequel + +Trials and tribulations prior to the start of training. + +For the trials and tribulation during the training see: [chronicles](chronicles.md). + +# A100 experiments + +200B + +torch.optim.Adam: + +16 nodes: +- 1st node: 61GB +- all nodes: 47GB +- performance: XXX + +apex.optimizers.FusedAdam + +16 nodes: +- 1st node: 51GB +- all nodes: 44GB +- performance: XXX + + + +## Size + + +Here are some existing models around the same size with NLAYERS / NHIDDEN and their ratio: + + +| origin | size | layers | hidden | ratio | +| ------ | --- | -----: | -----: | ----: | +| bs | 104B | 64 | 11600 | 180 | +| meg-lm | 145B | 80 | 12288 | 154 | +| openai | 175B | 96 | 12288 | 128 | +| meg-lm | 310B | 96 | 16384 | 170 | +| msft | 530B | 105 | 20480 | 195 | +| | | | | | + + + + +Possible ideas: + +- 205B: 112 / 12288 (ratio: 109) narrow +- 206B: 96 / 13312 (ratio: 139) closer to typical 150-200 ratio + +Formula to get model size, used 150k dict roughly - need to update: +``` +NHIDDEN=12288; NLAYERS=112; SEQ_LEN=2048; VOCAB_SIZE=150257; python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + s*h + 2*h) / 10**9 :.0f}B, ratio={int(h/l)}')" +``` + +### 104B topology / memory usage + +Looking at the current 104B topology to try to estimate the 200B model, though many things are different. + +``` +NLAYERS=64 +NHIDDEN=11600 +NHEADS=80 +SEQ_LEN=2048 +VOCAB_SIZE=50257 +``` + +32 GB gpus. + +TP=4, PP=32 + +breakdown: + +104B: + +- embedding size: `v*h`: `50257*11600` = 582_981_200 / 4 (TP=4) => 145_745_300 params per gpu for embedding +- one layer size: `12*h**2 + 13*h`: 1_614_870_800 / 4 (TP=4) => 403_717_700 params per gpu per layer + +64 layers over PP=32 => 2 layers per gpu + +Total params per gpu: +- gpu w/ emb: `2*403_717_700 + 145_745_300` = 953_180_700 params * 18 bytes = 17_157_252_600 bytes (17GB) +- gpu w/o emb: `2*403_717_700` = 807_435_400 params * 18 bytes = 14_533_837_200 (15GB) + +plus activations memory + +Checking the actual GPU allocations (nvidia-smi) - also need to take into account the cuda kernels (1271MiB) + +- 22GB (w/ embed) (4GB activations memory) +- 18GB (w/o embed) (2GB activations memory) + +## Hardware + +384 A100s 80GB / 8 gpus per node + +We can plan to use 384 gpus out of 416 as 4 nodes of 8 gpus need to remain reserved for when some nodes happen to be down. + +Initially we will have only 144 gpus and then around mid-Feb we should have the rest. + +## Possible config: + +So a possible config is + +- a single replica needs to fit 96 gpus and then we can do DP=4 to a full 384 gpus + +- extrapolating from the current 104B setup we can have: TP=4/PP=24 @ 80GB + 150K vocab size (which is different from the 50k vocab in 104B - 3x bigger embed matrix plus bigger hidden size. + +- most likely the embedding layer now will need to be partitioned together with the transformer blocks to do a good balancing of resources. e.g. in the current 1.3B ml setup, the 1st and last gpus use all of DRAM, but the rest of gpus use only 1/2 DRAM - and TLOPs are ~21 which is very underutilized. + + +### Possible topologies for 200B + +206B: + +``` +NLAYERS=96 +NHIDDEN=13312 +NHEADS=XXX +SEQ_LEN=2048 +VOCAB_SIZE=150_000 +``` + +Overall we know that DP is the fastest, then PP, then TP - but for PP to be efficient we need a big bs. + +The following math is trying various topologies to fit into 80GB gpus + + +* TP=4, PP=24 + +- embedding size: `v*h: 150257*13312` = `2_000_221_184 / 4` (TP=4) => 500_055_296 params per gpu for embedding +- one layer size: `12*h**2 + 13*h`: `2_126_685_184 / 4` (TP=4) => 531_671_296 params per gpu per layer + +In other words 2B params per layer w/o TP, or 38GB (`2.12*18`) per layer. + +So here we definitely need to balance embedding layer with transformer layers as they are of the same size, so overall 2+layers blocks to balance - and the constraint won't be Layers % PP = 0 but Layers+2 % PP = 0 + +So probably should do 94 layers? + +94+2 layers over PP=24 => 4 layers per gpu + +Total params per gpu (considering emb layer on par with transformers block): +- `4*531_671_296` = `2_126_685_184 params * 18` = 38_280_333_312 bytes +plus activations memory + +40GB A100 takes 1573MiB for cuda kernels (probably about the same for 80GB? may be a bit larger) +`python -c "import torch; import time; torch.ones(1).cuda(); time.sleep(30)"` + check `nvidia-smi` output. + + + +* TP=1, PP=96 + +~2B params per layer w/o TP, or 38GB (`2.12*18`) per layer. + +but DS breaks if there isn't at least one transformer block per gpu :( +otherwise could do a very efficient: + +``` +1 | 2 | 3 ... | 95 | 96 +emb | transf | transf ....| transf | emb +``` + +So in this scenario no TP is needed, which should make the assembly much faster. But will require DS fixing their side. or perhaps we could somehow hack on a dummy layer which will be like transformers? e.g. if it's the first or last layer it'd be an identity forward. + +Also the pipeline will be super long here, which to make efficient will require a huge global batch size. + + + +* with TP=2, PP=48 + +1_063_342_592 params per layer, 19_140_166_656 bytes (19GB) per layer + +perhaps could squeeze 3 layers per gpu - but of course each gpu will be less efficient since it will have to do 3 pipe stages. + +* Other considerations + +Of course, we could make the model wider and shallower so for example with TP=1 perhaps we could fit a bit more width and use less layers. e.g. 530B model was NLAYERS=105, NHIDDEN=20480 - so it's much wider. + + + +## Reconsiderations + +After discussing the above plans with the NVIDIA and DeepSpeed experts it appears that: + +1. on A100 and especially with much larger models TP>1 is much more beneficial and typically NVIDIA almost always uses TP=gpus_per_node for large models. + +2. A very deep PP (96) would be very difficult to keep efficient unless the batch size per replica is huge. + +3. Too many layers isn't great either: + +Jared Casper writes: + +> Regarding hidden size vs transformer layer (width vs depth), some feedback I got is that there isn't really a magic formula/process. We increase depth with the width but not as drastically as a typical vision model scaling. So you shouldn't go too crazy with depth. The width is somewhat constrained by sizes good for the GPU, so it seems a strategy is to push out the width but keep it nice numbers, then fill out with depth. You'll notice even at 530B params we only went to 105 layers. + + +## Existing models + +Let's first analyse a few existing models and see how they fit 80GB A100 8-gpu nodes. + + +* 145B meg-lm + +``` +NHIDDEN=12288; NLAYERS=80; SEQ_LEN=2048; VOCAB_SIZE=50257; python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + s*h + 2*h) / 10**9 :.0f}B, ratio={int(h/l)}')" +Model size: 146B, ratio=153 +``` + +``` +NHIDDEN=12288; VOCAB_SIZE=50257; TP=8; python -c "h=$NHIDDEN; v=$VOCAB_SIZE; tp=$TP; emb=v*h/10**6; blk=(12*h**2+13*h)/10**6; print(f'emb size: {emb:.2f}M/{emb*18:.2f}GB, per gpu {emb/tp:.2f}M/{emb*18/tp:.2f}GB'); print(f'blk size: {blk:.2f}M/{blk*18:.2f}GB, per gpu {blk/tp:.2f}M/{blk*18/tp:.2f}GB')" +emb size: 617.56M/11116.04GB, per gpu 77.19M/1389.51GB +blk size: 1812.10M/32617.78GB, per gpu 226.51M/4077.22GB +``` + +MP=64: TP=8, PP=8: one replica 64 gpus + +so 80/8=10 PP stages per gpu: `10*4` =40GB of weights/optim states/grads per gpu + + +* 310B meg-lm + +``` +NHIDDEN=16384; NLAYERS=96; SEQ_LEN=2048; VOCAB_SIZE=50257; python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + s*h + 2*h) / 10**9 :.0f}B, ratio={int(h/l)}')" +Model size: 310B, ratio=170 +``` + +MP=128: TP=8, PP=16: one replica 128 gpus + +``` +NHIDDEN=16384; VOCAB_SIZE=50257; TP=8; python -c "h=$NHIDDEN; v=$VOCAB_SIZE; tp=$TP; emb=v*h/10**6; blk=(12*h**2+13*h)/10**6; print(f'emb size: {emb:.2f}M/{emb*18:.2f}GB, per gpu {emb/tp:.2f}M/{emb*18/tp:.2f}GB'); print(f'blk size: {blk:.2f}M/{blk*18:.2f}GB, per gpu {blk/tp:.2f}M/{blk*18/tp:.2f}GB')" +emb size: 823.41M/14821.39GB, per gpu 102.93M/1852.67GB +blk size: 3221.44M/57985.89GB, per gpu 402.68M/7248.24GB +``` + +so `96/16=6` PP stages per gpu: `6*7.3` ~44GB of weights/optim states/grads per gpu + +* 530B msft + + +``` +NHIDDEN=20480; NLAYERS=105; SEQ_LEN=2048; VOCAB_SIZE=50257; python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + s*h + 2*h) / 10**9 :.0f}B, ratio={int(h/l)}')" +Model size: 310B, ratio=170 +``` + + +MP=280: TP=8, PP=35: one replica 280 gpus + +(actually don't know the vocab size here, but it doesn't matter much) + +``` +NHIDDEN=20480; VOCAB_SIZE=50257; TP=8; python -c "h=$NHIDDEN; v=$VOCAB_SIZE; tp=$TP; emb=v*h/10**6; blk=(12*h**2+13*h)/10**6; print(f'emb size: {emb:.2f}M/{emb*18:.2f}GB, per gpu {emb/tp:.2f}M/{emb*18/tp:.2f}GB'); print(f'blk size: {blk:.2f}M/{blk*18:.2f}GB, per gpu {blk/tp:.2f}M/{blk*18/tp:.2f}GB')" +emb size: 1029.26M/18526.74GB, per gpu 128.66M/2315.84GB +blk size: 5033.43M/90601.76GB, per gpu 629.18M/11325.22GB +``` + +so 105/35=3 PP stages per gpu: `6*7.3` = ~33.9GB of weights/optim states/grads per gpu + + +To summarize we can see the setup is so that about half the gpu is loaded with weights / optim states / grad `*18`) + +## Possible 200B models + + +So first let's try to come up with wider and shallower model to fit 200B, or wide if shallow doesn't work out too well topology/efficiency-wise + + +### 199B: 80 x 14336 (layers x hidden) + +``` +NHIDDEN=14336; NLAYERS=80; SEQ_LEN=2048; VOCAB_SIZE=150257; python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + s*h + 2*h) / 10**9 :.0f}B, ratio={int(h/l)}')" +Model size: 199B, ratio=179 +``` + +which gives us: + +``` +NHIDDEN=14336; VOCAB_SIZE=150257; TP=8; python -c "h=$NHIDDEN; v=$VOCAB_SIZE; tp=$TP; emb=v*h/10**6; blk=(12*h**2+13*h)/10**6; print(f'emb size: {emb:.2f}M/{emb*18:.2f}GB, per gpu {emb/tp:.2f}M/{emb*18/tp:.2f}GB'); print(f'blk size: {blk:.2f}M/{blk*18:.2f}GB, per gpu {blk/tp:.2f}M/{blk*18/tp:.2f}GB')" +emb size: 2154.08M/38773.52GB, per gpu 269.26M/4846.69GB +blk size: 2466.44M/44395.87GB, per gpu 308.30M/5549.48GB +``` + +TP=8, PP=10 - 80 gpus for one replica, can fit DP=4 (320/384) + +so with PP=10, we get 80/10 = 8 stages per gpu = 44GB for normal layer gpus and 50GB for the 1st/last gpus due to 5G embedding, the remaining 28GB for activations (2GB is cuda kernels) - could be enough, but not sure. + +If we are tight, consider giving the embedding its own layer so the total layers will be NLAYERS+2. In which case we need to change NLAYERS to be -2 than the wanted number to be able to spread out the layers evenly across gpus. + +Also consider that the more tightly we pack each gpu the more PP stages it'll have - the slower it'll run. + +And less GPUs means less processing power - so overall it's likely to be slower. + +### 206B: 96 x 13312 (layers x hidden) + +``` +NHIDDEN=13312; NLAYERS=96; SEQ_LEN=2048; VOCAB_SIZE=150257; python -c "h=$NHIDDEN; l=$NLAYERS; s=$SEQ_LEN; v=$VOCAB_SIZE; print(f'Model size: {(l*(12*h**2 + 13*h) + v*h + s*h + 2*h) / 10**9 :.0f}B, ratio={int(h/l)}')" +Model size: 206B, ratio=138 +``` + +``` +NHIDDEN=13312; VOCAB_SIZE=150257; TP=8; python -c "h=$NHIDDEN; v=$VOCAB_SIZE; tp=$TP; emb=v*h/10**6; blk=(12*h**2+13*h)/10**6; print(f'emb size: {emb:.2f}M/{emb*18:.2f}GB, per gpu {emb/tp:.2f}M/{emb*18/tp:.2f}GB'); print(f'blk size: {blk:.2f}M/{blk*18:.2f}GB, per gpu {blk/tp:.2f}M/{blk*18/tp:.2f}GB')" +emb size: 2000.22M/36003.98GB, per gpu 250.03M/4500.50GB +blk size: 2126.69M/38280.33GB, per gpu 265.84M/4785.04GB +``` + +TP=8, PP=12 => 96 gpus for one replica, can fit DP=4 (384/384) + +96/12 = 8 stages per gpu = ~40GB per gpu, same number of PP stages per gpu and more spare memory + +This might be a better fit memory-wise if the one above is too close to being full, especially on gpu 0 and -1. + +It also uses the full 384 gpu allocation in a snag way. + + + +## Train time estimation + +So A100 spec is 312 TFLOPS for BF16, so probably the best would be 50% of that so 150 TFLOPs (which we probably won't reach, but let's see), so yes I agree 150 is a bit too optimistic, but let's use it as the best case scenario. + + +Also we still don't know how many gpus we will end up using, but let's say we use them all - 350. Once we decide on the topology we will be able to replace 350 with the actual number of gpus we plan to use. + +``` +$ python -c 'print(f"{8*300*200_000_000/(350*150)/(3600*24):0.2f}", "days")' +105.82 days +``` + +so 3.5 months in the best case scenario. But more likely 150-200 days since it'll be less of everything plus potential issues. We will know more once we get access to 1 replica as then we should get a much better TFLOPs estimation, which will then be less for DP>1. + +And this estimate is w/o encountering any problems, which is unlikely, so add more overhead for rollbacks and restarts. + +Additionally this number is too optimistic since we won't have the full number of GPUs till about some time in end of February. + +See [Estimate total training time](../../math#estimate-total-training-time) for details of the math. + +XXX: actually are we training for 300B or 400B tokens because of Multi-Lingual? in which case it'll be 1/3 longer! + + +## Allocated hours sufficiency check + +We currently have about 3M gpu hours left in our allocation. + +Let's see how many total gpus hours the good estimation is: + + +``` +python -c 'print(f"{8*300*200_000_000/150/3600:0.2f}", "compute hours")' +888888.89 compute hours +``` +So if it takes 2x longer than the best case scenario, then we say need about 2M hours, so we are fine there. + +Important nuance: + +We will have an exclusive access only till May, and in May we will have to share with others. + +So at the moment we will have only about 3 months of having access to all gpus. + + + +## Best TFLOPs + +To measure best TFLOPs possible use a single, so that it uses all the intra-node connections (NVLink) and doesn't touch the network: + +### fp16 + +- 1 node, 1 replica + +20B model: TP=8, PP=1, NLAYERS=8, NHIDDEN=14400, NHEADS=32, SEQ_LEN=2048, VOCAB_LENGTH=250k, GBS=2048 + +``` + iteration 2/ 95367 | consumed samples: 4096 | consumed tokens: 8388608 | elapsed time per iteration (s): 769.99 | learning rate: 3.787E-06 | global batch size: 2048 | lm loss: 6.384045E+01 | loss scale: 4096.0 | grad norm: 15906.210 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 2.660 | TFLOPs: 108.47 | +``` + +- 10 nodes, 1 replica + +200B model: TP=8, PP=10, NLAYERS=80, NHIDDEN=14400, NHEADS=96, SEQ_LEN=2048, VOCAB_LENGTH=250k, GBS=2048 + +``` + iteration 2/ 95367 | consumed samples: 4096 | consumed tokens: 8388608 | elapsed time per iteration (s): 844.81 | learning rate: 3.787E-06 | global batch size: 2048 | lm loss: 6.373861E+01 | loss scale: 4096.0 | grad norm: 34132.119 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 2.424 | TFLOPs: 98.87 | +``` + +- 20 nodes, 2 replicas + +``` + iteration 2/ 95367 | consumed samples: 4096 | consumed tokens: 8388608 | elapsed time per iteration (s): 430.21 | learning rate: 3.787E-06 | global batch size: 2048 | lm loss: 6.373876E+01 | loss scale: 4096.0 | grad norm: 34027.311 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 4.761 | TFLOPs: 97.07 | +``` + +It was puzzling why much less memory was used for identical set up with DP=2 over DP=1 - but it's because of ZeRO-1 that saves a lot of memory across all GPUs! + + +| GPUs | Size | DP | TP | PP | MBS | Mem | TFLOPs | Notes | +| ---: | ---: | -: | -: | -: | --: | ---: | -----: | ----: | +| 8 | 20B | 1 | 8 | 1 | 1 | 67GB | 108.47 | 02-17 | +| 80 | 200B | 1 | 8 | 10 | 1 | 73GB | 98.87 | 02-17 | +| 160 | 200B | 2 | 8 | 10 | 1 | 51GB | 97.07 | 02-17 | +| | | | | | | | | | + +*Mem = max memory used by the first (last) nodes with the word embedding matrix - max is 77GB + + +### bf16 + +- 1 node, 1 replica + +20B model: TP=8, PP=1, NLAYERS=8, NHIDDEN=14400, NHEADS=32, SEQ_LEN=2048, VOCAB_LENGTH=250k, GBS=2048 + +``` + iteration 2/ 95367 | consumed samples: 4096 | consumed tokens: 8388608 | elapsed time per iteration (s): 777.09 | learning rate: 3.787E-06 | global batch size: 2048 | lm loss: 6.381926E+01 | loss scale: 1.0 | grad norm: 2.763 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 2.635 | TFLOPs: 107.48 | +``` + + +- 10 nodes, 1 replica + +200B model: TP=8, PP=10, NLAYERS=80, NHIDDEN=14400, NHEADS=96, SEQ_LEN=2048, VOCAB_LENGTH=250k, GBS=2048 + +``` + iteration 2/ 95367 | consumed samples: 4096 | consumed tokens: 8388608 | elapsed time per iteration (s): 853.81 | learning rate: 3.787E-06 | global batch size: 2048 | lm loss: 6.369443E+01 | loss scale: 1.0 | grad norm: 4.461 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 2.399 | TFLOPs: 97.82 | +``` + + +- 20 nodes, 2 replicas + + +``` + iteration 2/ 95367 | consumed samples: 4096 | consumed tokens: 8388608 | elapsed time per iteration (s): 434.14 | learning rate: 3.787E-06 | global batch size: 2048 | lm loss: 6.369444E+01 | loss scale: 1.0 | grad norm: 6.314 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | samples per second: 4.717 | TFLOPs: 96.19 | +``` + + +| GPUs | Size | DP | TP | PP | MBS | Mem | TFLOPs | Notes | +| ---: | ---: | -: | -: | -: | --: | ---: | -----: | ----: | +| 8 | 20B | 1 | 8 | 1 | 1 | 68GB | 107.48 | 02-17 | +| 80 | 200B | 1 | 8 | 10 | 1 | 75GB | 97.82 | 02-17 | +| 160 | 200B | 2 | 8 | 10 | 1 | 53GB | 96.19 | 02-17 | +| | | | | | | | | | + +*Mem = max memory used by the first (last) nodes with the word embedding matrix - max is 77GB + +So we can load more stages as we get higher DP as ZeRO spreads out over more gpus - smaller shards. + + + +## dealing with JZ hanging on the large model + +This overcomes the hanging which in general should lead to a slower throughput since all CUDA operations become synchronous and would block until they are done. + +``` +export CUDA_LAUNCH_BLOCKING=1 +``` + +200B, measuring 2nd iter: + +| GPUs | async | GBS | TFLOPs | Notes | +| ---: | ----: | ---: | -----: | -----------: | +| 80 | no | 512 | 91.04 | | +| 80 | yes | 512 | 97.20 | | +| 160 | no | 512 | 84.59 | | +| 160 | yes | 512 | 84.44 | | +| 160 | no | 2048 | 90.29 | | +| 160 | yes | 2048 | 90.25 | may hang | +| 320 | no | 2048 | 87.78 | | +| 320 | yes | 2048 | xxxx | always hangs | +| | | | | | + +async/yes == `CUDA_LAUNCH_BLOCKING=0` + +Interesting. Sometimes `CUDA_LAUNCH_BLOCKING=1` impacts the speed, at other times it doesn't. Perhaps with larger set ups it's barely impacting since there is a lot more comms than the small setup. + + +## Choosing the fastest 3D Topology + +Benchmarking the fastest 3D topology. Constraint: can use at most 48 nodes of 8 gpu a100 80gb nodes. + +Note that we want not the highest TFLOPs but the highest speed per iteration, since one can get high TFLOPs on less GPUs and overall slower speed, since we only care about how fast we can finish the training. + +Also note that the model size isn't always the same as the number of layers had to be tweaked to fit PP and NHIDDEN was fixed - so speed/tflops can't be exactly compared - but can be brought back to the same size by tweaking NHIDDEN. also since for efficiency of finishing this process I take the snapshot of a single iteration (always 2nd) the data isn't exact and can fluctuate a bit. But the point of this exercise is to get a feel of which topology is superior. + + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: | +| 48 | 200B | 12 | 8 | 4 | 1 | 2040 | 47GB | 189.06 | 91.67 | 02-20 | +| 45 | 200B | 9 | 8 | 5 | 1 | 2043 | 44GB | 208.40 | 88.84 | 02-20 | +| 48 | 194B | 8 | 8 | 6 | 1 | 2048 | 39GB | 183.64 | 92.38 | 02-20 | +| 42 | 191B | 6 | 8 | 7 | 1 | 2046 | 39GB | 202.99 | 94.20 | 02-20 | +| 48 | 200B | 6 | 8 | 8 | 1 | 2046 | 36GB | 185.75 | 93.59 | 02-20 | +| 45 | 205B | 5 | 8 | 9 | 1 | 2045 | 37GB | 199.14 | 94.23 | 02-20 | +| 40 | 200B | 4 | 8 | 10 | 1 | 2048 | 35GB | 221.21 | 94.39 | 02-20 | +| 44 | 195B | 4 | 8 | 11 | 1 | 2048 | 32GB | 197.15 | 92.67 | 02-20 | +| 48 | 183B | 4 | 8 | 12 | 1 | 2048 | 30GB | 172.40 | 90.84 | 02-20 | +| | | | | | | | | | | | + +* Sec/it throughput at iteration 2 + +As you can see the 80GB is totally unnecessary for MBS=1 as we are bound by compute of each gpu and we barely use half the gpu memory and trying to pack more on each gpu slows the ensemble down. This is of course thanks to ZeRO which shards all fp32 optim+grad+params over all gpus - so the more gpus you use the less memory is needed to accomodate the same model size, regardless of DP/TP/PP topology. (with MBS=1 that is so that the activations don't take too much memory) + +This table doesn't take into account batch size rampup which needs to be divisible by DP as it progressed from 32, 64, ... so really we have an additional constraint of `DP % 4 = 0` and `GBS % 32 = 0`. + +which means from the above list only a few configs are suitable, and these are: + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: | +| 48 | 194B | 8 | 8 | 6 | 1 | 2048 | 39GB | 183.64 | 92.38 | 02-20 | +| 40 | 200B | 4 | 8 | 10 | 1 | 2048 | 35GB | 221.21 | 94.39 | 02-20 | +| 44 | 195B | 4 | 8 | 11 | 1 | 2048 | 32GB | 197.15 | 92.67 | 02-20 | +| 48 | 183B | 4 | 8 | 12 | 1 | 2048 | 30GB | 172.40 | 90.84 | 02-20 | +| | | | | | | | | | | | + +Increasing MBS will speed up things a bit and we have a ton of spare memory to accommodate a larger MBS, but have to ensure we get the batch size ramp up sorted out. So if the rampup steps are in increments of 32 with DP=4 highest MBS is 8. and `log2(MBS) % 2 = 0`. + + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: | +| 48 | 194B | 8 | 8 | 6 | 1 | 2048 | 39GB | 183.64 | 92.38 | 02-20 | +| 48 | 194B | 8 | 8 | 6 | 2 | 2048 | 45GB | 172.36 | 98.43 | 02-20 | +| 48 | 194B | 8 | 8 | 6 | 4 | 2048 | 56GB | 173.92 | 97.55 | 02-20 | +| 48 | 194B | 8 | 8 | 6 | 8 | 2048 | 75GB | 192.42 | 88.17 | 02-20 | +| | | | | | | | | | | | + + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ---------------------: | +| 40 | 200B | 4 | 8 | 10 | 1 | 2048 | 35GB | 221.21 | 94.39 | 02-20 | +| 40 | 200B | 4 | 8 | 10 | 2 | 2048 | 43GB | 207.92 | 100.43 | 02-20 | +| 40 | 200B | 4 | 8 | 10 | 4 | 2048 | 55GB | 208.18 | 100.30 | 02-20 | +| 40 | 200B | 4 | 8 | 10 | 8 | 2048 | 76GB | 229.69 | 90.91 | 02-20 too close to OOM | +| | | | | | | | | | | | + + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: | +| 44 | 195B | 4 | 8 | 11 | 1 | 2048 | 32GB | 197.15 | 92.67 | 02-20 | +| 44 | 195B | 4 | 8 | 11 | 2 | 2048 | 41GB | 186.65 | 97.89 | 02-20 | +| 44 | 195B | 4 | 8 | 11 | 4 | 2048 | 53GB | 185.79 | 98.34 | 02-20 | +| 44 | 195B | 4 | 8 | 11 | 8 | 2048 | 75GB | 206.62 | 88.42 | 02-20 | +| | | | | | | | | | | | + + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: | +| 48 | 183B | 4 | 8 | 12 | 1 | 2048 | 30GB | 172.40 | 90.84 | 02-20 | +| 48 | 183B | 4 | 8 | 12 | 2 | 2048 | 39GB | 161.96 | 96.69 | 02-20 | +| 48 | 183B | 4 | 8 | 12 | 4 | 2048 | 50GB | 163.32 | 95.89 | 02-20 | +| | | | | | | | | | | | + +The models are slightly different in size so can't compare absolute numbers. + +But clearly MBS=2 is about the best, MBS=4 is close by. + +If we utilize all 48 nodes then we have PP6 and PP12 as contenders. + + +## tile and wave quantization + + +A100 80GB has 108 SMs + +https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#tile-quant + +``` +nhidden % 128 = 0 +``` + +https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#wave-quant + +``` +nhidden % 108 = 0 +``` + +TP=8: + +``` +nhidden % 8 = 0 +``` + +Combining all 3: + +``` +nhidden = 108*8*c = 864*c +``` + +which gives 864*16 = 13824 (187B) => so let's try to compare with 14400 (200B) + +XXX: This is a total guestimate - need proper math + +| Nodes | Size | NHIDDEN | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: | +| 40 | 200B | 14400 | 4 | 8 | 10 | 1 | 2048 | 35GB | 221.21 | 94.39 | 02-20 | +| 40 | 187B | 13824 | 4 | 8 | 10 | 1 | 2048 | 33GB | 160.29 | 120.05 | 02-20 | +| 40 | 187B | 13824 | 4 | 8 | 10 | 2 | 2048 | 39GB | 151.07 | 127.38 | 02-20 | +| 40 | 187B | 13824 | 4 | 8 | 10 | 4 | 2048 | 53GB | 147.43 | 130.53 | 02-20 | +| 40 | 187B | 13824 | 4 | 8 | 10 | 8 | 2048 | 73GB | 152.51 | 126.18 | 02-20 | +| | | | | | | | | | | | | + + +## TFLOPs calculation improved + +Until now we used an estimated TFLOPs calculator which was under-reporting the real TFLOPs. And we couldn't compare those to the TFLOPs reported by [Megatron-LM](https://github.com/NVIDIA/Megatron-LM#readme). + +Deepak Narayanan fixed this here: https://github.com/bigscience-workshop/Megatron-DeepSpeed/pull/251 + +So from here on all the TLOPs reports will be about 3% higher - so can't exactly compare to the earlier numbers in this document. + + +## 48 node contenders + +So we have 2 set ups that fit well into 48 nodes - and that's PP=6/DP=8 or PP=12/DP=4 + +NHIDDEN=14336 / NLAYERS=72 + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: | +| 48 | 181B | 4 | 8 | 12 | 1 | 2048 | 29GB | 143.31 | 112.49 | 02-21 | +| 48 | 181B | 4 | 8 | 12 | 2 | 2048 | 37GB | 134.02 | 120.29 | 02-21 | +| 48 | 181B | 4 | 8 | 12 | 4 | 2048 | 49GB | 123.69 | 130.34 | 02-21 | +| 48 | 181B | 4 | 8 | 12 | 8 | 2048 | 69GB | 129.26 | 124.72 | 02-21 | +| | | | | | | | | | | | + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: | +| 48 | 181B | 8 | 8 | 6 | 1 | 2048 | 38GB | 139.82 | 115.31 | 02-21 | +| 48 | 181B | 8 | 8 | 6 | 2 | 2048 | 44GB | 131.02 | 123.05 | 02-21 | +| 48 | 181B | 8 | 8 | 6 | 4 | 2048 | 56GB | 121.48 | 132.71 | 02-21 | +| | | | | | | | | | | | + + +So it's either: + +* DP=4, PP=12, MBS=4: 123 secs/it | 130 TFLOPS +* DP=8, PP=06, MBS=4: 121 secs/it | 133 TFLOPS + +Let's compare again with another setup: + +NHIDDEN=13824 / NLAYERS=84 + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: | +| 48 | 196B | 4 | 8 | 12 | 2 | 2048 | 39GB | 143.89 | 121.45 | 02-21 | +| 48 | 196B | 4 | 8 | 12 | 4 | 2048 | 52GB | 133.12 | 131.27 | 02-21 | +| 48 | 196B | 8 | 8 | 6 | 2 | 2048 | 65GB | 141.41 | 123.58 | 02-21 | +| 48 | 196B | 8 | 8 | 6 | 4 | 2048 | 56GB | 130.31 | 134.11 | 02-21 | +| | | | | | | | | | | | + +This one has 15% more layers than the previous tables, so here the less-PP-stages setup wins, that is: + +* DP=8, PP=06, MBS=4: 130.31 secs/it | 134.11 TFLOPS + +The following has so far given the highest TFLOPs, as we are packing more into less GPUs so 64 gpus are left out, and of course the total speed for iteration is much slower. So the key is the iteration speed and not TFLOPs. + +NHIDDEN=13824 / NLAYERS=80 + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: | +| 40 | 187B | 8 | 8 | 10 | 4 | 2048 | GB | 147.04 | 135.92 | 02-21 | +| | | | | | | | | | | | + + +Max possible TFLOPs check for `NHIDDEN=14336`: + +NHIDDEN=14336 / NLAYERS=6 / GBS=512 + +| Nodes | Size | Layers | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -----: | -: | -: | --: | --: | ---: | -----: | -----: | ----: | +| 1 | 18B | 6 | 8 | 1 | 2 | 2048 | 54GB | 130.43 | 143.48 | 02-21 | +| 1 | 18B | 6 | 8 | 1 | 2 | 2048 | 54GB | 119.19 | 157.02 | 02-21 | +| 1 | 18B | 10 | 8 | 1 | 1 | 2048 | 80GB | 205.52 | 142.59 | 02-21 | +| | | | | | | | | | | | + +Trying with ZeRO_STAGE=0/1 + +NHIDDEN=14336 / NLAYERS=72 + +| Nodes | Size | ZS | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: | +| 48 | 181B | 1 | 4 | 8 | 12 | 2 | 2048 | 37GB | 120.29 | 134.02 | 02-21 | +| 48 | 181B | 0 | 4 | 8 | 12 | 2 | 2048 | 72GB | 137.34 | 113.02 | 02-21 | +| | | | | | | | | | | | | + +* ZS = ZERO_STAGE + +XXX: currently can't test `ZeRO_STAGE=0` on master, or `ZeRO_STAGE=1` on the special branch - so need to retest the above on the same branch. + + +## Final round comparison + +all NHEADS=64 (above too) + +NHIDDEN=12288 / NLAYERS=96 + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: | +| 48 | 177B | 8 | 8 | 6 | 2 | 2048 | GB | 136.73 | 115.73 | 02-23 | +| 48 | 177B | 8 | 8 | 6 | 4 | 2048 | GB | 122.96 | 128.69 | 02-23 | +| | | | | | | | | | | | +| | | | | | | | | | | | + +NHIDDEN=13312 / NLAYERS=84 + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: | +| 48 | 182B | 4 | 8 | 12 | 4 | 2048 | GB | 125.52 | 129.29 | 02-23 | +| 48 | 182B | 8 | 8 | 6 | 2 | 2048 | GB | 135.55 | 119.72 | 02-23 | +| 48 | 182B | 8 | 8 | 6 | 4 | 2048 | GB | 122.93 | 132.00 | 02-23 | +| | | | | | | | | | | | + +NHIDDEN=13824 / NLAYERS=78 + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: | +| 48 | 182B | 8 | 8 | 6 | 4 | 2048 | GB | 121.28 | 133.93 | 02-23 | +| | | | | | | | | | | | + +NHIDDEN=14336 / NLAYERS=72 + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | --: | ---: | -----: | -----: | ----: | +| 48 | 181B | 4 | 8 | 12 | 4 | 2048 | GB | 123.79 | 130.24 | 02-23 | +| 48 | 181B | 8 | 8 | 6 | 4 | 2048 | GB | 120.85 | 133.40 | 02-23 | +| | | | | | | | | | | | + + +## NHEADs comparison + +NHIDDEN=14336 / NLAYERS=72 + +not many variations around 100 as `14336 = 2**11*7` and the constraint is `(HEADS/TP)*MBS % 4 = 0` or for `MBS=4, TP=8` `HEADS % 16 = 0` + +| Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: | +| 48 | 181B | 8 | 8 | 6 | 4 | 16 | 2048 | 54GB | 121.03 | 133.20 | 02-24 | +| 48 | 181B | 8 | 8 | 6 | 4 | 32 | 2048 | 55GB | 124.01 | 130.00 | 02-23 | +| 48 | 181B | 8 | 8 | 6 | 4 | 64 | 2048 | 55GB | 120.18 | 134.15 | 02-23 | +| 48 | 181B | 8 | 8 | 6 | 4 | 112 | 2048 | 53GB | 138.72 | 116.21 | 02-23 | +| 48 | 181B | 8 | 8 | 6 | 4 | 128 | 2048 | 55GB | 124.89 | 129.08 | 02-23 | +| 48 | 181B | 8 | 8 | 6 | 4 | 256 | 2048 | 54GB | 132.85 | 121.35 | 02-24 | +| | | | | | | | | | | | | + +NHIDDEN=13824 / NLAYERS=78 + +here `13824 = 2**9*3**3` + +| Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: | +| 48 | 182B | 8 | 8 | 6 | 4 | 64 | 2048 | GB | 121.28 | 133.93 | 02-23 | +| 48 | 182B | 8 | 8 | 6 | 4 | 96 | 2048 | 59GB | 124.75 | 130.21 | 02-23 | +| 48 | 182B | 8 | 8 | 6 | 4 | 128 | 2048 | 54GB | 162.72 | 99.82 | 02-23 | +| | | | | | | | | | | | | + +NHEADS=108 breaks constraints for invoking optimized fused softmax kernel + + +NHIDDEN=13312 / NLAYERS=84 + +| Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: | +| 48 | 182B | 8 | 8 | 6 | 4 | 64 | 2048 | GB | 122.93 | 132.00 | 02-23 | +| 48 | 182B | 8 | 8 | 6 | 4 | 128 | 2048 | GB | 129.17 | 125.63 | 02-23 | +| | | | | | | | | | | | | + + +NHIDDEN=12288 / NLAYERS=96 + +| Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: | +| 48 | 177B | 8 | 8 | 6 | 4 | 64 | 2048 | GB | 122.96 | 128.69 | 02-24 | +| 48 | 177B | 8 | 8 | 6 | 4 | 96 | 2048 | GB | 145.40 | 108.83 | 02-24 | +| 48 | 177B | 8 | 8 | 6 | 4 | 128 | 2048 | GB | 129.42 | 122.27 | 02-24 | +| | | | | | | | | | | | | + + +## GBS Variations + +Note: A100s PCI-Express/NUMA was improved today so all TFLOPs have changed for the better (1-5%) - thus do not compare today's numbers to yesterday's. + +NLAYERS=72 +NHIDDEN=14336 +NHEADS=64 + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | ---: | ---: | -----: | -----: | ----: | +| 48 | 181B | 8 | 8 | 6 | 4 | 1568 | 56GB | 113.01 | 109.22 | 02-25 | +| 48 | 181B | 8 | 8 | 6 | 4 | 2048 | 55GB | 114.11 | 141.27 | 02-25 | +| 48 | 181B | 8 | 8 | 6 | 6 | 2016 | 66GB | 123.57 | 128.43 | 02-25 | +| 48 | 181B | 4 | 8 | 12 | 4 | 1568 | GB | 92.75 | 133.08 | 02-25 | +| 48 | 181B | 4 | 8 | 12 | 4 | 2048 | 49GB | 117.07 | 137.70 | 02-25 | +| 48 | 181B | 4 | 8 | 12 | 2 | 1568 | GB | 99.93 | 123.51 | 02-25 | +| 48 | 181B | 4 | 8 | 12 | 2 | 2048 | GB | 128.82 | 125.15 | 02-25 | +| | | | | | | | | | | | + +some more configs with lower PP: + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | ---: | ---: | -----: | -----: | ----: | +| 48 | 181B | 6 | 8 | 8 | 4 | 2016 | 52GB | 113.16 | 140.24 | 02-25 | +| 48 | 181B | 12 | 8 | 4 | 2 | 2016 | 53GB | 125.52 | 126.43 | 02-25 | +| 48 | 181B | 12 | 8 | 4 | 4 | 2016 | 59GB | 114.81 | 138.22 | 02-25 | +| 48 | 181B | 24 | 8 | 2 | 1 | 2016 | 65GB | 145.45 | 109.11 | 02-25 | +| 48 | 181B | 24 | 8 | 2 | 2 | 2016 | 76GB | 136.13 | 116.58 | 02-25 | +| 48 | 181B | 48 | 8 | 1 | 1 | 2016 | OOM | | | 02-25 | +| | | | | | | | | | | | + +Tweaking TP for the first time from the TP=8 is best assumption. But if the model fits into smaller TP it should be faster! + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | ---: | ---: | -----: | -----: | ----: | +| 48 | 181B | 8 | 4 | 12 | 4 | 2048 | 60GB | 111.89 | 144.08 | 02-25 | +| 48 | 181B | 8 | 4 | 12 | 2 | 2048 | 44GB | 110.48 | 145.92 | 02-25 | +| 48 | 181B | 8 | 4 | 12 | 2 | 2048 | 38GB | 113.54 | 141.99 | 02-25 | +| 48 | 181B | 16 | 4 | 6 | 4 | 2048 | 75GB | 117.11 | 137.66 | 02-25 | +| 48 | 181B | 16 | 4 | 6 | 2 | 2048 | 57GB | 111.71 | 144.31 | 02-25 | +| 48 | 181B | 16 | 2 | 12 | 2 | 2048 | 63GB | 112.50 | 143.30 | 02-25 | +| 48 | 181B | 32 | 2 | 6 | 2 | 2048 | OOM | | | 02-25 | +| 48 | 181B | 32 | 2 | 6 | 1 | 2048 | OOM | | | 02-25 | +| 48 | 181B | 8 | 2 | 24 | 1 | 2048 | 44GB | 119.53 | 134.88 | 02-25 | +| 48 | 181B | 8 | 2 | 24 | 2 | 2048 | 53GB | 122.75 | 131.33 | 02-25 | +| 48 | 181B | 4 | 4 | 24 | 1 | 2048 | GB | 130.60 | 123.44 | 02-25 | +| | | | | | | | | | | | + + +NHIDDEN=12288 / NLAYERS=96 + +| Nodes | Size | DP | TP | PP | MBS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | ---: | ---: | -----: | -----: | ----: | +| 48 | 177B | 8 | 1 | 48 | 1 | 2048 | 58GB | 142.17 | 111.30 | 02-25 | +| | | | | | | | | | | | + + +## Another round of NHEADS + +to retest with TP<8 variations + +NHIDDEN=13824 / NLAYERS=78 + +| Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: | +| 48 | 182B | 8 | 4 | 12 | 1 | 64 | 2048 | | 148.24 | 109.57 | 02-26 | +| 48 | 182B | 8 | 4 | 12 | 2 | 64 | 2048 | 48GB | 103.51 | 156.92 | 02-26 | +| 48 | 182B | 8 | 4 | 12 | 2 | 96 | 2048 | 48GB | 107.12 | 151.64 | 02-26 | +| 48 | 182B | 8 | 4 | 12 | 2 | 128 | 2048 | | 147.41 | 110.19 | 02-26 | +| 48 | 182B | 8 | 4 | 12 | 4 | 64 | 2048 | | 106.72 | 152.21 | 02-26 | +| 48 | 182B | 8 | 4 | 12 | 4 | 96 | 2048 | | 110.31 | 147.25 | 02-26 | +| 48 | 182B | 8 | 4 | 12 | 4 | 128 | 2048 | | 153.90 | 105.54 | 02-26 | +| 48 | 182B | 8 | 8 | 6 | 4 | 96 | 2048 | | 118.12 | 137.51 | 02-26 | +| 48 | 182B | 8 | 8 | 6 | 4 | 128 | 2048 | | 156.84 | 103.56 | 02-26 | +| | | | | | | | | | | | | + +NHIDDEN=14336 / NLAYERS=72 + +| Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | -----: | ---: | ---: | -----: | -----: | ----: | +| 48 | 181B | 8 | 4 | 12 | 2 | 64 | 2048 | | 110.42 | 146.00 | 02-26 | +| 48 | 181B | 8 | 4 | 12 | 2 | 128 | 2048 | | 114.02 | 141.39 | 02-26 | +| 48 | 181B | 8 | 4 | 12 | 4 | 128 | 2048 | | 137.53 | 117.23 | 02-26 | +| 48 | 181B | 8 | 8 | 6 | 4 | 64 | 2048 | | 113.95 | 141.47 | 02-26 | +| 48 | 181B | 8 | 8 | 6 | 4 | 128 | 2048 | | 116.06 | 138.90 | 02-26 | +| | | | | | | | | | | | | + +NHIDDEN=13312 / NLAYERS=84 + +| Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | -----: | ---: | ---: | -----: | -----: | ----: | +| 48 | 182B | 8 | 4 | 12 | 2 | 64 | 2048 | | 103.82 | 156.46 | 02-26 | +| 48 | 182B | 8 | 4 | 12 | 4 | 64 | 2048 | | 113.21 | 143.34 | 02-26 | +| 48 | 182B | 8 | 8 | 6 | 2 | 64 | 2048 | | 129.61 | 125.21 | 02-26 | +| | | | | | | | | | | | | + +## Batchsize Warmup + +NHIDDEN=13824 / NLAYERS=78 + +| Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: | +| 48 | 182B | 8 | 4 | 12 | 2 | 96 | 512 | | 35.77 | 113.52 | 02-26 | +| 48 | 182B | 8 | 4 | 12 | 2 | 96 | 1024 | | 59.65 | 136.15 | 02-26 | +| 48 | 182B | 8 | 4 | 12 | 2 | 96 | 1536 | | 83.11 | 146.59 | 02-26 | +| 48 | 182B | 8 | 4 | 12 | 2 | 96 | 2048 | | 107.12 | 151.64 | 02-26 | +| | | | | | | | | | | | | + +## Re-do + +78/12=6.5 - so the last stage has 1 block, while the rest have 7 - which is uneven. So that config is not optimal as it wastes gpus. + +NHIDDEN=13824 / NLAYERS=78 + +| Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: | +| 48 | 182B | 8 | 8 | 6 | 2 | 96 | 2048 | GB | 133.57 | 121.61 | 02-27 | +| 48 | 182B | 8 | 8 | 6 | 4 | 96 | 2048 | 59GB | 118.24 | 137.38 | 02-27 | +| 48 | 182B | 16 | 4 | 6 | 2 | 96 | 2048 | GB | | | 02-27 | +| 48 | 182B | 16 | 4 | 6 | 4 | 96 | 2048 | 75GB | 115.55 | 140.57 | 02-27 | +| | | | | | | | | | | | | + +HIDDEN=12288; NLAYERS=106; regex partition_method='type:transformer|embed') + +| Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: | +| 48 | 195B | 8 | 4 | 12 | 2 | 96 | 2048 | 44GB | 112.69 | 154.86 | 02-27 | +| 48 | 195B | 8 | 4 | 12 | 2 | 64 | 2048 | GB | 110.96 | 157.27 | 02-27 | +| | | | | | | | | | | | | + +## Rebalancing layers + +Do not compare these numbers to the previous ones. For 2 reasons: + +- First, from now on the testing is happening with BF16 optimizer that was just written to accumulate gradients in fp32, so it is more memory heavy and is a bit slower - this is compared to fp16 which grad accumulates in fp16. The additional memory usage is 4bytes x params and it's not sharded across gpus. +- I implemented and enabled `--pp-partition-method 'type:transformer|embedding'` so we use 2 layers less, to match `2+nlayers*PP` math to get a perfect balance giving each embedding layer its own slot on par with transformer layers. This is because 250k embedding matrix takes as much space as a single transformer layer. + +HIDDEN=12288; NLAYERS=106; Model size: 195B, ratio=115 + +| Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: | +| 48 | 195B | 8 | 4 | 12 | 2 | 64 | 2048 | 67GB | 116.54 | 149.75 | 02-28 | +| 48 | 195B | 8 | 4 | 12 | 2 | 96 | 2048 | 65GB | 118.79 | 146.90 | 02-28 | +| 48 | 195B | 8 | 4 | 12 | 2 | 128 | 2048 | 67GB | 121.42 | 143.73 | 02-28 | +| 48 | 195B | 8 | 4 | 12 | 4 | 96 | 2048 | 79GB | 120.34 | 145.01 | 02-28 | +| | | | | | | | | | | | | + + +HIDDEN=12288; NLAYERS=100; Model size: 184B, ratio=122 + +| Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: | +| 48 | 184B | 16 | 4 | 6 | 2 | 64 | 2048 | OOM | x | x | 02-28 | +| 48 | 184B | 16 | 4 | 6 | 1 | 64 | 2048 | OOM | x | x | 02-28 | +| 48 | 184B | 8 | 8 | 6 | 2 | 64 | 2048 | 61GB | 139.72 | 117.91 | 02-28 | +| 48 | 184B | 8 | 8 | 6 | 4 | 64 | 2048 | 72GB | 120.96 | 136.20 | 02-28 | +| | | | | | | | | | | | | + + +NHIDDEN=13312; NLAYERS=82; Model size: 178B, ratio=162 + +| Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: | +| 48 | 178B | 4 | 8 | 12 | 4 | 64 | 2048 | 52GB | 111.79 | 141.76 | 02-28 | +| 48 | 178B | 8 | 4 | 12 | 2 | 64 | 2048 | 63GB | 104.45 | 151.71 | 02-28 | +| 48 | 178B | 8 | 4 | 12 | 2 | 104 | 2048 | 62GB | 123.71 | 128.10 | 02-28 | +| 48 | 178B | 8 | 4 | 12 | 2 | 128 | 2048 | 60GB | 108.78 | 145.68 | 02-28 | +| 48 | 178B | 8 | 4 | 12 | 4 | 64 | 2048 | 74GB | 104.82 | 151.18 | 02-28 | +| | | | | | | | | | | | | + +NHIDDEN=13312; NLAYERS=94 Model size: 203B, ratio=141 + +| Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: | +| 48 | 203B | 8 | 4 | 12 | 2 | 128 | 2048 | 67GB | 124.10 | 146.12 | 02-28 | +| | | | | | | | | | | | | + +NHIDDEN=14336; NLAYERS=70; Model size: 176B, ratio=204 + +| Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: | +| 48 | 176B | 4 | 8 | 12 | 2 | 64 | 2048 | 40GB | 121.63 | 128.92 | 02-28 | +| 48 | 176B | 8 | 4 | 12 | 2 | 64 | 2048 | 59GB | 102.03 | 153.68 | 02-28 | +| 48 | 176B | 8 | 4 | 12 | 2 | 112 | 2048 | 59GB | 104.50 | 150.05 | 02-28 | +| 48 | 176B | 8 | 4 | 12 | 2 | 128 | 2048 | 60GB | 105.89 | 148.08 | 02-28 | +| 48 | 176B | 8 | 4 | 12 | 4 | 64 | 2048 | 73GB | 102.27 | 153.33 | 02-28 | +| | | | | | | | | | | | | + +NHIDDEN=14336; NLAYERS=82; Model size: 206B, ratio=174 + +| Nodes | Size | DP | TP | PP | MBS | NHEADS | GBS | Mem | Sec/it | TFLOPs | Notes | +| ----: | ---: | -: | -: | -: | --: | -----: | --: | ---: | -----: | -----: | ----: | +| 48 | 206B | 8 | 4 | 12 | 2 | 128 | 2048 | OOM | | | 02-28 | +| | | | | | | | | | | | | + + + +(was quickly getting the memory snapshot with: `pdsh -w jean-zay-iam01 "source ~/.pdshrc; nvidia-smi"`) + + +## Hanging Issue + +Here we are dealing with 320-384 A100 GPUs working in ensemble. + +It appears that the system can't handle heavy NCCL traffic or something of sorts. It can handle less than 100B model over 40nodes (TP=8/PP=10/DP=4). It can handle 200B over 10 nodes. At 100B over 20-40 nodes random GPUs start not to respond and the whole system hangs until it times out. I was able to test with the same NHIDDEN and growing the model on the layer dimension: + +- 10 layers - 25B works +- 20 layers - 50B works +- 40 layers - 100B hangs after succeeding iteration 1 + +I was just starting to diagnose on the hidden dimension and now 13/52 nodes are down and so I can't continue with this line of work, since 40 nodes gave me a reliable failure and 20 nodes is intermittent failure, so it's not good for diagnosing. + +This is for a single replica of 10 nodes with 200B model + 250k vocab. + +I think the failed nodes that crashed and didn't recover are high suspects for having internal problems. Even though when I tested in groups of 10 nodes everything was dandy - note - the same 200B model. +One more data point - Deepspeed ZeRO shards data over all gpus - so the more GPUs are involved the more communication happens. This is totally orthogonal to DP. + +The next day: + +Most of the nodes have come back this morning so continuing the dimensional growing experiments. +To remind, growing on the layer dimension and keeping hidden at `1024*14` worked until 40 layers were reached where it was hanging. So it couldn't handle 100B model in this dimension. +Now I'm keeping the layers dimension frozen to 80 and growing the nhidden dimension, starting from `1024*4` - proving that it works and then incrementing the size until it hangs: + +- `1024*10` works (100B model) +- `1024*12` hangs (145B model) + +So these 2 experiments both show that when the inter-node traffic exceeds certain level - the system is fails. + +So it's not the size of each `all_reduce`/`broadcast` packet since at full NHIDDEN but only 1/4 of layers everything is just fine. + +And BTW to get a quick success/failure indication I'm working with `GLOBAL_BATCH_SIZE=64` so PP is very inefficient, but it doesn't matter for the purpose of this experiment. + +Using `py-spy` on the processes to dump python call stacks I have derived the same story on each node: + +On each node with TP=8 - i.e. each node is only TP - the same situation: (checked nodes 0 and 1 only) + +6 processes are in: + +``` +Thread 835990 (active): "MainThread" + train (megatron/training.py:915) + pretrain (megatron/training.py:187) + (pretrain_gpt.py:239) +``` +2 processes are in: +``` +Thread 835995 (active): "MainThread" + broadcast (torch/distributed/distributed_c10d.py:1191) + _aggregate_total_loss (deepspeed/runtime/pipe/engine.py:540) + train_batch (deepspeed/runtime/pipe/engine.py:330) + train_step (megatron/training.py:436) + train (megatron/training.py:851) + pretrain (megatron/training.py:187) + (pretrain_gpt.py:239) +``` + +so 6 processes finished `train_step` and now are trying to: +``` + torch.distributed.all_reduce( + done_cuda, op=torch.distributed.ReduceOp.MAX) +``` +but for some reason 2 processes never finished the `train_step` and are stuck broadcasting I presume to the other 6 processes, which have long gone. + +So this hanging happens partially in Deepspeed and partially in Megatron-LM, somehow processes get out of sync even though everything works just fine on a smaller scale. But the issue could be brought on by apex's `FusedAdam` as we have dealt with a serious issue in it as well a week earlier, but it could also be pytorch, NCCL or some internal system issue. It's very hard to find the cause. + +As I shared earlier the problem doesn't exist or goes away if either of 2 things happens: + +- the model is under 100B (short stack of layer or narrow hidden) and 20 or more nodes are used in a single job +- `CUDA_LAUNCH_BLOCKING=1` + +Topology is TP=8, PP=10, DP=4 + +It has been very difficult to work on diagnosing this issue since every time I run the hanging setup I would lose a few nodes and since I'm 10h behind JeanZay, nobody is around there to reboot the nodes. + +So first of all it appears that `CUDA_LAUNCH_BLOCKING=1` removes the hanging issue and I did several performance checks and it surprisingly has no impact on this framework at this scale. Normally, it should make things much slower as it makes CUDA ops synchronous. + +### py-spying all processes + +After discussing this issue with Samyam I first run `py-spy` on all processes, but alas several processes weren't responding, so we had no idea how to tell where they were hanging. + +For posterity here is the process: + + +In one console, first allocate the gpus: +``` +salloc --partition=gpu_p5 --constraint=a100 --reservation=hug --nodes=2 --ntasks-per-node=1 --cpus-per-task=64 --hint=nomultithread --gres=gpu:8 --time 20:00:00 --account=six@a100 +``` +We are doing that so that if SLURM kills the processes we could still access those. + +Now run the training job, which calls the main `srun` with all the gpus: +``` +bash 200B-n40-bf16-mono.slurm +``` + +Wait till the program hangs. + +Now in another console get the `SLURM_JOBID` (or get it from `salloc` log): +``` +squeue -u `whoami` -o "%.16i %.9P %.26j %.8T %.10M %.8l %.6D %.20S %R" +``` + +Adjust jobid with `SLURM_JOBID` from above: +``` +srun --jobid=2180718 --gres=gpu:0 --nodes=40 --tasks-per-node=1 --output=trace-%N.out sh -c 'ps aux | grep python | egrep -v "grep|srun" | grep `whoami` | awk "{print \$2}" | xargs -I {} py-spy dump --native --pid {}' || echo "failed" +``` + +Must use `--gres=gpu:0` for the monitor `srun` or otherwise it will block until the first `srun` exits + +I also attempted using `pdsh` via `ds_ssh`, but somehow I wasn't able to run `py-spy` remotely - the main issue was that remote `ssh` command wasn't giving the same env as when I was logged in interactively via `ssh`. But if you have `sudo` access on the compute nodes than you could do: + +First prepare `hostfile`: +``` +function makehostfile() { +perl -e '$slots=split /,/, $ENV{"SLURM_STEP_GPUS"}; +$slots=8 if $slots==0; # workaround 8 gpu machines +@nodes = split /\n/, qx[scontrol show hostnames $ENV{"SLURM_JOB_NODELIST"}]; +print map { "$b$_ slots=$slots\n" } @nodes' +} +makehostfile > hostfile +``` + +Now run the `py-spy` extraction command over all nodes: +``` +ds_ssh -f hostfile "source ~/.pdshrc; ps aux | grep python | grep -v grep | grep `whoami` | awk '{print \$2}' | xargs -I {} sudo py-spy dump --pid {} " +``` + +### python trace + +So next came the idea of tracing all calls like one does with `strace(1)`, I researched python calls tracing facilities and have discovered that python has a `trace` sub-system. + +This code will trace all python calls and log them to the console and into a dedicated per process log file, via a custom `Tee` module I added. + +This then can help to understand where some processes stopped responding, since we will have the log of the last call before it went unresponsive. + +``` +$ cat pretrain_gpt.py +[...] + +def main(): + pretrain(train_valid_test_datasets_provider, model_provider, forward_step, + args_defaults={'tokenizer_type': 'GPT2BPETokenizer'}) + +import re +class Tee: + """ + A helper class to tee print's output into a file. + Usage: + sys.stdout = Tee(filename) + """ + + def __init__(self, filename): + self.stdout = sys.stdout + self.file = open(filename, "a") + + def __getattr__(self, attr): + return getattr(self.stdout, attr) + + def write(self, msg): + self.stdout.write(msg) + self.file.write(msg) + self.file.flush() + + def flush(self): + self.stdout.flush() + self.file.flush() + +if __name__ == "__main__": + + import sys + import trace + import socket + import os + + # enable to trace + if 0: + cwd = os.path.realpath('.') + pid = os.getpid() + hostname = socket.gethostname() + local_rank = int(os.environ["LOCAL_RANK"]) + trace_output_file = f"{cwd}/trace-{hostname}-{local_rank}-{pid}.txt" + + # create a Trace object, telling it what to ignore, and whether to + # do tracing or line-counting or both. + tracer = trace.Trace( + ignoredirs=[sys.prefix, sys.exec_prefix], + trace=1, + count=1, + ) + # outfile=trace_output_file) + + # run the new command using the given tracer + sys.stdout = Tee(trace_output_file) + tracer.run('main()') + else: + main() + +``` + +This code doesn't require any special handing other than enabling the trace by changing `if 0` to `if 1`. + +Of course, this will now dump all python calls. I was worried that the slowdown will mask the issue causing the hanging, but surprisingly it didn't. + +I got 14GB (!) of data logged of just python calls from 320 processes. + +In retrospect I probably should have started the tracing at a later place, probably just before `train_step` - otherwise we have gotten a lot of useless traces of the dataloader and other preliminary code. + +I wish I could tell `trace` which packages to follow, but alas it only supports dirs to ignore, which is much more difficult to set, and thus you end up with a lot more data than one needs. But still this is a super useful tool for debugging hanging processes. + + +### To be continued + +We needed to do some more tweaks to get to the root of it. + +Unfortunately I had to pause here, since I had to switch to testing the final version of the code and I couldn't risk losing nodes. + +With having `CUDA_LAUNCH_BLOCKING=1` workaround providing a robust solution we will use that for a time being. + +# a few preliminary runs + + +## main-1 + +While the final data is being cleaned up we are doing a few preliminary runs with data that still has some issues. + +GBS ramp up of `--rampup-batch-size 16 16 9_765_625` - the first few stages starting with GBS=16 are really slow (8 TFLOPs). The pipeline doesn't have enough data to even fill all the stages once, so it's super inefficient and it'll take days until we start hitting 100 TFLOPs. + +But there were no spikes during this brief experiment. + + + +## main-2 + +Trying `--rampup-batch-size 384 16 9_765_625` since 384 is the first GBS where the pipe is filled up fully for the first time. `12*2*4=384` (`PP*MBS*DP`). The throughput start at 100 TFLOPs right away (and it should be 150 TFLOPS once we reach GBS=2048). + +Found a bug: tied weights weren't getting reduced - was getting a spike on restart, fixed at +https://github.com/microsoft/DeepSpeed/pull/1801/commits/37011a92bad42b07c2cb742751873ef7073d84b8 + +So only the front embed matrix grad updates were making, the end one were ignored. + +Will do a totally new run to compare that it's similar or better. + + + + +## main-3 + +Trying the rebased to master version 61d51fd62141ddb51b629b785af256fac407e048 and it has serious issues - the learning is much much slower + +## main-4 + +So rolling back `olruwase/bf16-updates` branch to the fix: + +37011a92bad42b07c2cb742751873ef7073d84b8 Reduce tied weight gradients + +This time the learning is just a tad slower than main-2, so either deepspeed@master introduced some regression or the merge didn't go well. + +additionally going to try the latest checkpoint from `main-3` as it's more progressive to check `main-4` for spikes. + +same spike. + +After analysis the module weights, it's clear we have 2 distinct issues: +1. `module.tied_modules.embed.word_embeddings.norm` is not taken care of at all in pp rank -1 and they get reset to defaults on load +2. `module.tied_modules.embed.word_embeddings.weight` is mismatching on pp rank -1 between after iteration of last step before save and before iteration of first step after load + +This was derived with this debug instrumentation: + +``` +diff --git a/megatron/training.py b/megatron/training.py +index fd65ae9..fd76d28 100644 +--- a/megatron/training.py ++++ b/megatron/training.py +@@ -58,6 +58,23 @@ from megatron.data.dataset_utils import analyze_data_prefix + + import deepspeed + ++def dump_weights(preamble, iteration, model): ++ ++ import os ++ import socket ++ hostname = socket.gethostname() ++ pid = os.getpid() ++ ++ tp_rank = mpu.get_tensor_model_parallel_rank() ++ pp_rank = mpu.get_pipeline_model_parallel_rank() ++ dp_rank = mpu.get_data_parallel_rank() ++ global_rank = torch.distributed.get_rank() ++ ++ fn = f"debug-{iteration}-pp{pp_rank}-tp{tp_rank}-dp{dp_rank}-global{global_rank}-{preamble}-{pid}.txt" ++ #print(fn) ++ with open(fn, "w") as fh: ++ for n, p in model[0].named_parameters(): ++ fh.write(f"{n}={p}\n") + + def print_datetime(string): + """Note that this call will sync across all ranks.""" +@@ -426,6 +443,8 @@ def setup_model_and_optimizer(model_provider_func): + if args.fp16: + optimizer.reload_model_params() + ++ #optimizer.update_lp_params() ++ + return model, optimizer, lr_scheduler + + +@@ -848,12 +867,18 @@ def train(forward_step_func, model, optimizer, lr_scheduler, + args.pipeline_model_parallel_size >= 1: + args.curriculum_seqlen = args.curriculum_scheduler.update_difficulty( \ + args.iteration + 1) ++ ++ dump_weights("before-iteration", iteration+1, model) ++ + loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = \ + train_step(forward_step_func, + train_data_iterator, + model, + optimizer, + lr_scheduler) ++ ++ dump_weights("after-iteration", iteration+1, model) ++ + iteration += 1 + args.iteration = iteration + new_samples = mpu.get_data_parallel_world_size() * \ +``` + +and then +1. run 5 iterations and saved checkpoint, then run: +``` +mkdir a; mv debug-* a +``` +2. restarted and run a few iterations, then run: + +``` +mkdir b; mv debug-* b +``` + +I basically dumped weights for all ranks before and after train_step + +Now let's compared them all. Comparing: +1. the after iteration of the last step before save (iteration 805 in this example) +2. the before iteration step after the load (on restart) (iteration 806 in this example) + +with the help of: +``` +perl -le 'print qx[diff -u a/debug-805-*global$_-after-iteration-*.txt b/debug-806-*-global$_-before-iteration-*.txt] for 0..383' +``` + +Result: all `a/debug-805-pp11-*-after-iteration-*.txt` and corresponding `b/debug-806-pp11-*-before-iteration-*.txt` mismatch. + +so here is a sample diff: +``` +--- a/debug-805-pp11-tp1-dp4-global369-after-iteration-377074.txt 2022-03-06 05:44:06.074835000 +0100 ++++ b/debug-806-pp11-tp1-dp4-global369-before-iteration-378990.txt 2022-03-06 05:48:24.842635000 +0100 +@@ -1,21 +1,15 @@ + module.tied_modules.embed.word_embeddings.weight=Parameter containing: +-tensor([[-3.1090e-04, 4.6082e-03, -2.3499e-03, ..., -1.1292e-02, +- 2.1667e-03, -2.7313e-03], +- [-1.1353e-02, 9.9487e-03, -1.9684e-03, ..., -5.4550e-04, +- -2.3460e-04, 4.2114e-03], +- [ 3.2806e-03, -3.4332e-04, -5.5847e-03, ..., 7.6294e-03, +- 1.7853e-03, 2.5868e-05], ++tensor([[-0.0006, 0.0046, -0.0024, ..., -0.0114, 0.0014, -0.0030], ++ [-0.0109, 0.0096, -0.0020, ..., -0.0005, -0.0001, 0.0041], ++ [ 0.0027, -0.0004, -0.0056, ..., 0.0070, 0.0017, 0.0003], + ..., +- [ 1.6098e-03, 4.1809e-03, -2.4567e-03, ..., -4.6692e-03, +- -4.5776e-03, 1.7090e-03], +- [ 5.7373e-03, 3.5858e-03, -1.7471e-03, ..., 2.3041e-03, +- -6.4392e-03, 1.0223e-03], +- [-1.6937e-03, -1.4038e-02, 2.1057e-03, ..., -3.6011e-03, +- 1.3275e-03, -5.8594e-03]], device='cuda:1', dtype=torch.bfloat16, +- requires_grad=True) ++ [ 0.0018, 0.0039, -0.0026, ..., -0.0051, -0.0043, 0.0016], ++ [ 0.0051, 0.0039, -0.0015, ..., 0.0027, -0.0063, 0.0008], ++ [-0.0018, -0.0142, 0.0021, ..., -0.0035, 0.0015, -0.0060]], ++ device='cuda:1', dtype=torch.bfloat16, requires_grad=True) + module.tied_modules.embed.word_embeddings.norm.weight=Parameter containing: +-tensor([0.9961, 0.9961, 0.9961, ..., 0.9961, 0.9961, 0.9961], device='cuda:1', +- dtype=torch.bfloat16, requires_grad=True) ++tensor([1., 1., 1., ..., 1., 1., 1.], device='cuda:1', dtype=torch.bfloat16, ++ requires_grad=True) + module.tied_modules.embed.word_embeddings.norm.bias=Parameter containing: + tensor([0., 0., 0., ..., 0., 0., 0.], device='cuda:1', dtype=torch.bfloat16, + requires_grad=True) +``` + + +## main-5 + +trying a new baseline with rampup starting from 192 + + + +## main-6 + +trying https://github.com/bigscience-workshop/Megatron-DeepSpeed/pull/260 - comparing with main-5 + +tracks exactly main-5 - merged. + + +## main-7 + +Running with https://github.com/bigscience-workshop/Megatron-DeepSpeed/pull/261 + +Don't allocate embed LN on pp rank -1, - different checkpoint + +still spikes on restart + + +# main-no-emb-norm + +disable `--embed-layernorm` completely, check if spikes on restart + +no spikes on restart + +## main-8 + +1. test https://github.com/bigscience-workshop/Megatron-DeepSpeed/pull/262 + +2. At 1438 switched to deepspeed@ab61edb02a137d91b61bd416b4e8d3eb287b0eba of olruwase/bf16-updates - let's see if it tracks still the previous runs - yes it does. + +So the restart spike's cause was this: the framework was putting `LayerNorm` that I added for the embedding layr into the wrong param group [here](https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/dd06ea32e014d8db6cdaf5e6839071d6523ca83c/megatron/optimizer/__init__.py#L31-L45). + +it should have been in `no_weight_decay_params` but ended up in `weight_decay_params` because in this module `LayerNorm` is an alias for `MixedFusedLayerNorm`, so if `isinstance(module_, LayerNorm)` was `False`. + +So if we want to use `torch.nn.LayerNorm` we have to change the code above to additionally check for ` or isinstance(module_, torch.nn.LayerNorm).` + +## main-9 + +re-running with deepspeed@77b649d160c1cd86f33415e2a7deab50c45fba16 of olruwase/bf16-updates which fixed the tied-embedding desynchronization bug due to clip grads not running on the last pp rank for tied embeddings. diff --git a/train/tr11-176B-ml/finetune.md b/train/tr11-176B-ml/finetune.md new file mode 100644 index 0000000000000000000000000000000000000000..2ba8360a4b1422e7abe4983d626144cdb739aab2 --- /dev/null +++ b/train/tr11-176B-ml/finetune.md @@ -0,0 +1,15 @@ +# Finetuning 176B + +Finetuning 176B at the end of the training might be necessary to ensure exact logits match between Megatron-DS trained model and HF model. +For now, there are 2 main bottlenecks that are responsible of not giving 100% logits match between HF model and Megatron model + +## Diverging bottlenecks + +### TP merging strategy + +See [this issue](https://github.com/pytorch/pytorch/issues/76232). When merging TP ranks the logits exactness is lost. The idea would be to finetune the 176B model with TP=1 + +### Use `torch_softmax` instead of `fused_softmax` + +`fused_softmax` and `torch_softmax` does not give the same results (ie, `torch.testing.assert_allclose(atol=0.0, rtol=0.0)` does not pass). The main model could be finetuned with `torch_softmax`. +See [this line](https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/cb48bd2c8bc182fb9872f127ef7c2267fbf9cc2e/megatron/model/fused_softmax.py#L204) diff --git a/train/tr11-176B-ml/smaller_models/tr11b-1B3-ml.slurm b/train/tr11-176B-ml/smaller_models/tr11b-1B3-ml.slurm new file mode 100644 index 0000000000000000000000000000000000000000..12b2048610d4845961f13146aafbaf817280ea7f --- /dev/null +++ b/train/tr11-176B-ml/smaller_models/tr11b-1B3-ml.slurm @@ -0,0 +1,205 @@ +#!/bin/bash +#SBATCH --job-name=tr11b-1B3-ml +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +#source $six_ALL_CCFRWORK/start-py38-pt110 +#source $six_ALL_CCFRWORK/start-py38-pt111 +source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml + +echo "START TIME: $(date)" + +variant=main + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11b-1B3-ml +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr11b-1B3-ml-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience +TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-1B3.txt +VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-1B3.txt +CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json +LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH + +TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=$SLURM_NNODES + +PP_SIZE=2 +TP_SIZE=2 + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +SEQ_LEN=2048 + +SAVE_INTERVAL=250 + +TRAIN_SAMPLES=220_000_000 # 450B tokens +LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr +LR_WARMUP_SAMPLES=183_105 # 375M tokens + + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples $LR_DECAY_SAMPLES \ + --lr-warmup-samples $LR_WARMUP_SAMPLES \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --rampup-batch-size 192 16 9_765_625 \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +# TODO: decide on efficient eval-interval + eval-iters + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 1 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=0 # important: bf16 must use z0! it implements its own zero stage 1 equivalent + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr11-176B-ml/smaller_models/tr11c-2B5-ml-continuation.slurm b/train/tr11-176B-ml/smaller_models/tr11c-2B5-ml-continuation.slurm new file mode 100644 index 0000000000000000000000000000000000000000..70654ffde2ac73dc852905b985e1de8bc273d48f --- /dev/null +++ b/train/tr11-176B-ml/smaller_models/tr11c-2B5-ml-continuation.slurm @@ -0,0 +1,204 @@ +#!/bin/bash +#SBATCH --job-name=tr11c-2B5-ml +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=32 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH -C v100-32g +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +source /gpfsscratch/rech/six/commun/commun/experiments/muennighoff/muennighoffsmallmodels + +echo "START TIME: $(date)" + +variant=main + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11c-2B5-ml +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr11c-2B5-ml-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience +TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-2B5.txt +VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-2B5.txt +CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json +LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH + +TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=$SLURM_NNODES + +PP_SIZE=2 +TP_SIZE=1 + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 + +NLAYERS=30 +NHIDDEN=2560 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=250 + +TRAIN_SAMPLES=220_000_000 # 450B tokens +LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr +LR_WARMUP_SAMPLES=183_105 # 375M tokens + + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 1.6e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples $LR_DECAY_SAMPLES \ + --lr-warmup-samples $LR_WARMUP_SAMPLES \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --rampup-batch-size 192 32 9_765_625 \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +# TODO: decide on efficient eval-interval + eval-iters + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 1 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 # important: bf16 must use z0! it implements its own zero stage 1 equivalent + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr11-176B-ml/smaller_models/tr11c-2B5-ml.slurm b/train/tr11-176B-ml/smaller_models/tr11c-2B5-ml.slurm new file mode 100644 index 0000000000000000000000000000000000000000..b66f9c17c2cff3067b520c12f90ce0745530e430 --- /dev/null +++ b/train/tr11-176B-ml/smaller_models/tr11c-2B5-ml.slurm @@ -0,0 +1,205 @@ +#!/bin/bash +#SBATCH --job-name=tr11c-2B5-ml +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=32 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +#source $six_ALL_CCFRWORK/start-py38-pt110 +#source $six_ALL_CCFRWORK/start-py38-pt111 +source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml + +echo "START TIME: $(date)" + +variant=main + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11c-2B5-ml +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr11c-2B5-ml-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience +TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-2B5.txt +VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-2B5.txt +CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json +LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH + +TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=$SLURM_NNODES + +PP_SIZE=4 +TP_SIZE=4 + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 + +NLAYERS=30 +NHIDDEN=2560 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=250 + +TRAIN_SAMPLES=220_000_000 # 450B tokens +LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr +LR_WARMUP_SAMPLES=183_105 # 375M tokens + + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 1.6e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples $LR_DECAY_SAMPLES \ + --lr-warmup-samples $LR_WARMUP_SAMPLES \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --rampup-batch-size 192 32 9_765_625 \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +# TODO: decide on efficient eval-interval + eval-iters + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 1 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=0 # important: bf16 must use z0! it implements its own zero stage 1 equivalent + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr11-176B-ml/smaller_models/tr11d-760M-ml-continuation.slurm b/train/tr11-176B-ml/smaller_models/tr11d-760M-ml-continuation.slurm new file mode 100644 index 0000000000000000000000000000000000000000..edfd8c81abbd48d0bbb5429c643e3f40f375d9c5 --- /dev/null +++ b/train/tr11-176B-ml/smaller_models/tr11d-760M-ml-continuation.slurm @@ -0,0 +1,205 @@ +#!/bin/bash +#SBATCH --job-name=tr11d-760M-ml +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH -C v100-32g +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +#source $six_ALL_CCFRWORK/start-py38-pt110 +#source $six_ALL_CCFRWORK/start-py38-pt111 +source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml + +echo "START TIME: $(date)" + +variant=main + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11d-760M-ml +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr11d-760M-ml-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience +TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-760M.txt +VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-760M.txt +CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json +LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH + +TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=$SLURM_NNODES + +PP_SIZE=2 +TP_SIZE=1 + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=256 + +NLAYERS=24 +NHIDDEN=1536 +NHEADS=16 +SEQ_LEN=2048 + +SAVE_INTERVAL=250 + +TRAIN_SAMPLES=220_000_000 # 450B tokens +LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr +LR_WARMUP_SAMPLES=183_105 # 375M tokens + + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2.5e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples $LR_DECAY_SAMPLES \ + --lr-warmup-samples $LR_WARMUP_SAMPLES \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --rampup-batch-size 192 16 9_765_625 \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --abort-on-unmet-fused-kernel-constraints \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +# TODO: decide on efficient eval-interval + eval-iters + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 1 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 # important: bf16 must use z0! it implements its own zero stage 1 equivalent + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr11-176B-ml/smaller_models/tr11d-760M-ml.slurm b/train/tr11-176B-ml/smaller_models/tr11d-760M-ml.slurm new file mode 100644 index 0000000000000000000000000000000000000000..a183e44ab0034b0d2e4738e6c2e80260143562f3 --- /dev/null +++ b/train/tr11-176B-ml/smaller_models/tr11d-760M-ml.slurm @@ -0,0 +1,205 @@ +#!/bin/bash +#SBATCH --job-name=tr11d-760M-ml +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH -C v100-32g +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +#source $six_ALL_CCFRWORK/start-py38-pt110 +#source $six_ALL_CCFRWORK/start-py38-pt111 +source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml + +echo "START TIME: $(date)" + +variant=main + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11d-760M-ml +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr11d-760M-ml-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience +TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-760M.txt +VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-760M.txt +CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json +LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH + +TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=$SLURM_NNODES + +PP_SIZE=2 +TP_SIZE=1 + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=256 + +NLAYERS=24 +NHIDDEN=1536 +NHEADS=16 +SEQ_LEN=2048 + +SAVE_INTERVAL=250 + +TRAIN_SAMPLES=220_000_000 # 450B tokens +LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr +LR_WARMUP_SAMPLES=183_105 # 375M tokens + + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2.5e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples $LR_DECAY_SAMPLES \ + --lr-warmup-samples $LR_WARMUP_SAMPLES \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --rampup-batch-size 192 16 9_765_625 \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --abort-on-unmet-fused-kernel-constraints \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +# TODO: decide on efficient eval-interval + eval-iters + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 1 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=0 # important: bf16 must use z0! it implements its own zero stage 1 equivalent + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr11-176B-ml/smaller_models/tr11e-350M-ml-continuation.slurm b/train/tr11-176B-ml/smaller_models/tr11e-350M-ml-continuation.slurm new file mode 100644 index 0000000000000000000000000000000000000000..eab995c06248edcd0bbd51e562a4e44a7f1950df --- /dev/null +++ b/train/tr11-176B-ml/smaller_models/tr11e-350M-ml-continuation.slurm @@ -0,0 +1,205 @@ +#!/bin/bash +#SBATCH --job-name=tr11e-350M-ml +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH -C v100-32g +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +#source $six_ALL_CCFRWORK/start-py38-pt110 +#source $six_ALL_CCFRWORK/start-py38-pt111 +source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml + +echo "START TIME: $(date)" + +variant=main + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11e-350M-ml +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr11e-350M-ml-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience +TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-350M.txt +VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-350M.txt +CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json +LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH + +TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=256 + +NLAYERS=24 +NHIDDEN=1024 +NHEADS=16 +SEQ_LEN=2048 + +SAVE_INTERVAL=250 + +TRAIN_SAMPLES=220_000_000 # 450B tokens +LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr +LR_WARMUP_SAMPLES=183_105 # 375M tokens + + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 3.0e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples $LR_DECAY_SAMPLES \ + --lr-warmup-samples $LR_WARMUP_SAMPLES \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --rampup-batch-size 192 32 9_765_625 \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --abort-on-unmet-fused-kernel-constraints \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +# TODO: decide on efficient eval-interval + eval-iters + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 1 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 # important: bf16 must use z0! it implements its own zero stage 1 equivalent + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr11-176B-ml/smaller_models/tr11e-350M-ml.slurm b/train/tr11-176B-ml/smaller_models/tr11e-350M-ml.slurm new file mode 100644 index 0000000000000000000000000000000000000000..1d4ba9f7aff7b434cc4210be1a4765298ea6ad1e --- /dev/null +++ b/train/tr11-176B-ml/smaller_models/tr11e-350M-ml.slurm @@ -0,0 +1,205 @@ +#!/bin/bash +#SBATCH --job-name=tr11e-350M-ml +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=8 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH -C v100-32g +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +#source $six_ALL_CCFRWORK/start-py38-pt110 +#source $six_ALL_CCFRWORK/start-py38-pt111 +source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml + +echo "START TIME: $(date)" + +variant=main + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11e-350M-ml +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr11e-350M-ml-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience +TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-350M.txt +VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-350M.txt +CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json +LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH + +TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=256 + +NLAYERS=24 +NHIDDEN=1024 +NHEADS=16 +SEQ_LEN=2048 + +SAVE_INTERVAL=250 + +TRAIN_SAMPLES=220_000_000 # 450B tokens +LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr +LR_WARMUP_SAMPLES=183_105 # 375M tokens + + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 3.0e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples $LR_DECAY_SAMPLES \ + --lr-warmup-samples $LR_WARMUP_SAMPLES \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --rampup-batch-size 192 32 9_765_625 \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --abort-on-unmet-fused-kernel-constraints \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +# TODO: decide on efficient eval-interval + eval-iters + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 1 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=0 # important: bf16 must use z0! it implements its own zero stage 1 equivalent + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr11-176B-ml/smaller_models/tr11f-6B3-ml-continuation.slurm b/train/tr11-176B-ml/smaller_models/tr11f-6B3-ml-continuation.slurm new file mode 100644 index 0000000000000000000000000000000000000000..8704d49ece443e1854431ff98ac6eb3557bd5e3c --- /dev/null +++ b/train/tr11-176B-ml/smaller_models/tr11f-6B3-ml-continuation.slurm @@ -0,0 +1,204 @@ +#!/bin/bash +#SBATCH --job-name=tr11f-6B3-ml +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +source /gpfsscratch/rech/six/commun/commun/experiments/muennighoff/muennighoffsmallmodels + +echo "START TIME: $(date)" + +variant=main + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11f-6B3-ml +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr11f-6B3-ml-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience +TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-6B3.txt +VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-6B3.txt +CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json +LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH + +TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +PP_SIZE=1 +TP_SIZE=1 + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 + +NLAYERS=30 +NHIDDEN=4096 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=1 + +TRAIN_SAMPLES=220_000_000 # 450B tokens +LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr +LR_WARMUP_SAMPLES=183_105 # 375M tokens + + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 1.2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples $LR_DECAY_SAMPLES \ + --lr-warmup-samples $LR_WARMUP_SAMPLES \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --rampup-batch-size 192 32 9_765_625 \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +# TODO: decide on efficient eval-interval + eval-iters + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 1 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 # important: bf16 must use z0! it implements its own zero stage 1 equivalent + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr11-176B-ml/smaller_models/tr11f-6B3-ml.slurm b/train/tr11-176B-ml/smaller_models/tr11f-6B3-ml.slurm new file mode 100644 index 0000000000000000000000000000000000000000..431e68dca8867da9dcad87e607597c8870e15024 --- /dev/null +++ b/train/tr11-176B-ml/smaller_models/tr11f-6B3-ml.slurm @@ -0,0 +1,206 @@ +#!/bin/bash +#SBATCH --job-name=tr11f-6B3-ml +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=32 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH -C v100-32g +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +#source $six_ALL_CCFRWORK/start-py38-pt110 +#source $six_ALL_CCFRWORK/start-py38-pt111 +source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml + +echo "START TIME: $(date)" + +variant=main + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11f-6B3-ml +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr11f-6B3-ml-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience +TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits-6B3.txt +VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits-6B3.txt +CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json +LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH + +TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=$SLURM_NNODES + +PP_SIZE=4 +TP_SIZE=4 + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 + +NLAYERS=30 +NHIDDEN=4096 +NHEADS=32 +SEQ_LEN=2048 + +SAVE_INTERVAL=250 + +TRAIN_SAMPLES=220_000_000 # 450B tokens +LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr +LR_WARMUP_SAMPLES=183_105 # 375M tokens + + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 1.2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples $LR_DECAY_SAMPLES \ + --lr-warmup-samples $LR_WARMUP_SAMPLES \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --rampup-batch-size 192 32 9_765_625 \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --fp16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +# TODO: decide on efficient eval-interval + eval-iters + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 1 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=0 # important: bf16 must use z0! it implements its own zero stage 1 equivalent + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +clear; srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr11-176B-ml/tr11-176B-ml.slurm b/train/tr11-176B-ml/tr11-176B-ml.slurm new file mode 100644 index 0000000000000000000000000000000000000000..d128f0d485081fec26e104317231b7b691336e57 --- /dev/null +++ b/train/tr11-176B-ml/tr11-176B-ml.slurm @@ -0,0 +1,221 @@ +#!/bin/bash +#SBATCH --job-name=tr11-176B-ml +#SBATCH --partition=gpu_p5 +#SBATCH --constraint=a100 +#SBATCH --reservation=hug +#SBATCH --qos=qos_gpu-gc # up to 100h +#SBATCH --nodes=24 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=64 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:8 # number of gpus +#SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@a100 + +set -x -e + +#source $six_ALL_CCFRWORK/start-py38-pt110 +#source $six_ALL_CCFRWORK/start-py38-pt111 +source $six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience/train/tr11-176B-ml/start-tr11-176B-ml + +echo "START TIME: $(date)" + +variant=main + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$variant +REPO_PATH=$DATA_OUTPUT_PATH/tr11-176B-ml-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard/$variant +LOGS_PATH=$REPO_PATH/logs/$variant +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/Megatron-DeepSpeed +cd $MEGATRON_DEEPSPEED_REPO + +KILL_SWITCH_PATH=$MEGATRON_DEEPSPEED_REPO/kill-switch-tr11-176B-exp1 + +BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/tr11-176B-ml/bigscience +TRAIN_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/train-splits.txt +VALID_DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/valid-splits.txt +CATALOGUE_JSON_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json +LOAD_RATIOS_SCRIPT=$BIGSCIENCE_REPO/data/catalogue/load_ratios_meg_ds_format.py +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split train --output-meg-ds-ratio-file $TRAIN_DATA_PATH +python $LOAD_RATIOS_SCRIPT --dataset-ratios-path $CATALOGUE_JSON_PATH --split valid --output-meg-ds-ratio-file $VALID_DATA_PATH + +TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles + +# defining the right environment variables +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +# testing for potential faulty nodes +# srun --jobid $SLURM_JOB_ID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=8 +NNODES=$SLURM_NNODES + +TP_SIZE=4 +PP_SIZE=12 + +MICRO_BATCH_SIZE=2 # was MBS=1 till GBS=784 +GLOBAL_BATCH_SIZE=2048 # 4.2M tokens. It is larger than the initial plan of 3.2M tokens to get higher throughput + +NHIDDEN=14336 +NLAYERS=70 +NHEADS=112 +SEQ_LEN=2048 + +SAVE_INTERVAL=100 + +TRAIN_SAMPLES=220_000_000 # 450B tokens +LR_DECAY_SAMPLES=200_000_000 # Decay for the first 410B tokens then continue at fixed --min-lr +LR_WARMUP_SAMPLES=183_105 # 375M tokens + + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 6e-5 \ + --min-lr 6e-6 \ + --lr-decay-style cosine \ + --lr-decay-samples $LR_DECAY_SAMPLES \ + --lr-warmup-samples $LR_WARMUP_SAMPLES \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " +# for 20h 1190, for 100h 5990 +# --exit-duration-in-mins 1190 \ +EXIT_OPTS=" \ + --exit-duration-in-mins 5990 \ + " + +GPT_ARGS=" \ + --pp-partition-method 'type:transformer|embedding' \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --rampup-batch-size 192 16 9_765_625 \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_SAMPLES \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \ + --init-method-std 0.0048 \ + --embed-layernorm \ + --sync-tp-duplicated-parameters \ + --bf16 \ + --seed 42 \ + --position-embedding-type alibi \ + --checkpoint-activations \ + --abort-on-unmet-fused-kernel-constraints \ + --kill-switch-path $KILL_SWITCH_PATH \ + --pad-vocab-size-to 250880 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +# TODO: decide on efficient eval-interval + eval-iters + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 1 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=0 # important: bf16 must use z0! it implements its own zero stage 1 equivalent + +config_json="./ds_config.$SLURM_JOB_ID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "bf16": { + "enabled": true + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.run \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \ + --rdzv_backend c10d \ + --max_restarts 0 \ + --tee 3 \ + " + +# --universal-checkpoint \ +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths-path $TRAIN_DATA_PATH \ + --valid-weighted-split-paths-path $VALID_DATA_PATH \ + --num-workers 2 \ + --valid-num-workers 0 \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# do not remove or the training will hang and nodes will be lost w/o this workaround +export CUDA_LAUNCH_BLOCKING=1 + +# hide duplicated errors using this hack - will be properly fixed in pt-1.12 +export TORCHELASTIC_ERROR_FILE=/tmp/torch-elastic-error.json + +# force crashing on nccl issues like hanging broadcast +export NCCL_ASYNC_ERROR_HANDLING=1 + +# srun error handling: +# --wait=60: wait 60 sec after the first task terminates before terminating all remaining tasks +# --kill-on-bad-exit=1: terminate a step if any task exits with a non-zero exit code +SRUN_ARGS=" \ + --wait=60 \ + --kill-on-bad-exit=1 \ + " + +clear; srun $SRUN_ARGS --jobid $SLURM_JOB_ID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" diff --git a/train/tr3-1B3-baseline/README.md b/train/tr3-1B3-baseline/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2266969a2ef2d2cfa8ded9a38b5830fc9a036eb9 --- /dev/null +++ b/train/tr3-1B3-baseline/README.md @@ -0,0 +1,27 @@ +# Arch/Scaling baselines (tr3) + +This folder contains the training scripts for the architecture and scaling baseline runs: no fancy tricks, just GPT2. Here are links to the respective tensorboards: + +| Size | 1B3 | 760M | 350M | 125M | +|--------------------- |----- |------ |------ |------ | +| C4 + low warmup | [a](https://huggingface.co/bigscience/tr3-1B3-modeling-baseline-tensorboard) | [b](https://huggingface.co/bigscience/tr3b-760M-modeling-baseline-tensorboard) | [c](https://huggingface.co/bigscience/tr3c-350M-modeling-baseline-tensorboard) | | +| OSCAR + low warmup | [f](https://huggingface.co/bigscience/tr3f-1B3-diagnostic2-low-warmup-oscar-tensorboard) | | | | +| C4 + high warmup | [e](https://huggingface.co/bigscience/tr3e-1B3-diagnostic1-warmup-c4-tensorboard) | | | | +| OSCAR + high warmup | **[d (current baseline)](https://huggingface.co/bigscience/tr3d-1B3-more-warmup-tensorboard)** | [g](https://huggingface.co/bigscience/tr3g-760M-v2-tensorboard) | [h](https://huggingface.co/bigscience/tr3h-350M-v2-tensorboard) | [i](https://huggingface.co/bigscience/tr3i-125M-v2-tensorboard) | +| Pile + high warmup | [m](https://huggingface.co/bigscience/tr3m-1B3-pile-tensorboard) | [j](https://huggingface.co/bigscience/tr3j-760M-pile-tensorboard) | [k](https://huggingface.co/bigscience/tr3k-350M-pile-tensorboard) | [l](https://huggingface.co/bigscience/tr3l-125M-pile-tensorboard) | + + + +# emb-norm + +a full re-run of `tr3m-1B3-pile-tensorboard` with `--embed-layernorm` enabled + +[script](tr3m-1B3-emb-norm-pile.slurm) + +results: + +- added `emb-norm` to https://huggingface.co/bigscience/tr3m-1B3-pile-tensorboard/tensorboard and moved the original run to `base`. Also upgraded the old TB to the new format so the new graph names match. + +- full standalone repo: https://huggingface.co/bigscience/tr3m-1B3-emb-norm-pile-logs/ with logs + +- last checkpoint saved in `$six_ALL_CCFRSTORE/checkpoints/tr3m-1B3-emb-norm-pile` diff --git a/train/tr3-1B3-baseline/tar_experiments.slurm b/train/tr3-1B3-baseline/tar_experiments.slurm new file mode 100644 index 0000000000000000000000000000000000000000..cd8ad14693a4ba6d1966ed1a9db8e19682994076 --- /dev/null +++ b/train/tr3-1B3-baseline/tar_experiments.slurm @@ -0,0 +1,16 @@ +#!/bin/bash +#SBATCH --job-name=tar_experiments # job name +#SBATCH --ntasks=1 # number of MP tasks +#SBATCH --nodes=1 +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --qos=qos_cpu-t3 +#SBATCH --output=../%x-%j.out # output file name +#SBATCH --account=six@cpu +#SBATCH --partition=cpu_p1 + +for filename in *; do + tar -cvf "$filename.tar" "$filename" + mv "$filename.tar" $ALL_CCFRSTORE/arch_scaling_experiments_store/"$filename.tar" +done diff --git a/train/tr3-1B3-baseline/tr3-1B3-modeling-baseline-hub-sync-logs.slurm b/train/tr3-1B3-baseline/tr3-1B3-modeling-baseline-hub-sync-logs.slurm new file mode 100644 index 0000000000000000000000000000000000000000..b8c29c16fa31294871ae245de9dc5798e2e05779 --- /dev/null +++ b/train/tr3-1B3-baseline/tr3-1B3-modeling-baseline-hub-sync-logs.slurm @@ -0,0 +1,21 @@ +#!/bin/bash +#SBATCH --job-name=tr3-1B3-hub-sync-logs # job name +#SBATCH --ntasks=1 # number of MP tasks +#SBATCH --nodes=1 # number of nodes +#SBATCH --cpus-per-task=1 # number of cores per task +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --partition=prepost + +echo "START TIME: $(date)" + +module load git-lfs + +DATA_OUTPUT_PATH=$SCRATCH/synched_exps/tr3-1B3-full +LOGS_PATH=$DATA_OUTPUT_PATH/logs +BIG_SCIENCE_REPO_PATH=$SCRATCH/bigscience + +$BIG_SCIENCE_REPO_PATH/tools/hub-sync.py --repo-path $LOGS_PATH --patterns '*.out' -d + +echo "END TIME: $(date)" diff --git a/train/tr3-1B3-baseline/tr3-1B3-modeling-baseline-hub-sync-tensorboard.slurm b/train/tr3-1B3-baseline/tr3-1B3-modeling-baseline-hub-sync-tensorboard.slurm new file mode 100644 index 0000000000000000000000000000000000000000..5ca8f93d672673a8177c48c298ecf53e881b928e --- /dev/null +++ b/train/tr3-1B3-baseline/tr3-1B3-modeling-baseline-hub-sync-tensorboard.slurm @@ -0,0 +1,22 @@ +#!/bin/bash +#SBATCH --job-name=tr3-1B3-hub-sync-tensorboard # job name +#SBATCH --ntasks=1 # number of MP tasks +#SBATCH --nodes=1 # number of nodes +#SBATCH --cpus-per-task=1 # number of cores per task +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --partition=prepost + +echo "START TIME: $(date)" + +module load git-lfs + +DATA_OUTPUT_PATH=$SCRATCH/synched_exps/tr3-1B3-full +TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard +BIG_SCIENCE_REPO_PATH=$SCRATCH/bigscience + +$BIG_SCIENCE_REPO_PATH/tools/hub-sync.py --repo-path $TENSORBOARD_PATH --patterns '*tfevents*' -d + +echo "END TIME: $(date)" + diff --git a/train/tr3-1B3-baseline/tr3-1B3-modeling-baseline.slurm b/train/tr3-1B3-baseline/tr3-1B3-modeling-baseline.slurm new file mode 100644 index 0000000000000000000000000000000000000000..5a0328e35b793c7fbe03c1c1098082a6907a18a3 --- /dev/null +++ b/train/tr3-1B3-baseline/tr3-1B3-modeling-baseline.slurm @@ -0,0 +1,182 @@ +#!/bin/bash +#SBATCH --job-name=1B3-full.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 + +set -x -e + + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$SCRATCH/synched_exps/tr3-1B3-full/ +MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed + +if [[ ${TESTING} == 1 ]]; then + # testing on 10k + DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/c4_preprocessing/c4_100k_text_document +else + # production on full 304M records + DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/c4_preprocessing/c4_en_train_text_document + +fi + +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=16 +GLOBAL_BATCH_SIZE=1024 +TRAIN_ITER=146_484_375 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 1e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 126_953_125 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_ITER \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path t5-small \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr3-1B3-modeling-baseline.$SLURM_JOBID.out diff --git a/train/tr3-1B3-baseline/tr3b-760M.slurm b/train/tr3-1B3-baseline/tr3b-760M.slurm new file mode 100644 index 0000000000000000000000000000000000000000..1c321b16249fc2087357eaf5b663f2f60f545fee --- /dev/null +++ b/train/tr3-1B3-baseline/tr3b-760M.slurm @@ -0,0 +1,180 @@ +#!/bin/bash +#SBATCH --job-name=760M.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 + +set -x -e + + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$SCRATCH/synched_exps/tr3b-760M/ +MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed + +if [[ ${TESTING} == 1 ]]; then + # testing on 10k + DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/c4_preprocessing/c4_100k_text_document +else + # production on full 304M records + DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/c4_preprocessing/c4_en_train_text_document + +fi + +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=256 +TRAIN_ITER=146_484_375 + +NLAYERS=24 +NHIDDEN=1536 +NHEADS=16 +FFN_HIDDEN_SIZE=6144 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2.5e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 126_953_125 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_ITER \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path t5-small \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee OUTPUT_PATH/logs/tr3b-760M-modeling-baseline.$SLURM_JOBID.out diff --git a/train/tr3-1B3-baseline/tr3c-350M.slurm b/train/tr3-1B3-baseline/tr3c-350M.slurm new file mode 100644 index 0000000000000000000000000000000000000000..029d2aef4e8ee4a3aecd576b659c4c69f36e12dc --- /dev/null +++ b/train/tr3-1B3-baseline/tr3c-350M.slurm @@ -0,0 +1,180 @@ +#!/bin/bash +#SBATCH --job-name=350M.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=4 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 + +set -x -e + + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$SCRATCH/synched_exps/tr3c-350M/ +MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed + +if [[ ${TESTING} == 1 ]]; then + # testing on 10k + DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/c4_preprocessing/c4_100k_text_document +else + # production on full 304M records + DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/c4_preprocessing/c4_en_train_text_document + +fi + +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=4 +PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=256 +TRAIN_ITER=146_484_375 + +NLAYERS=24 +NHIDDEN=1024 +NHEADS=16 +FFN_HIDDEN_SIZE=4096 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 3e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 126_953_125 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_ITER \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path t5-small \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee OUTPUT_PATH/logs/tr3c-350M-modeling-baseline.$SLURM_JOBID.out diff --git a/train/tr3-1B3-baseline/tr3d-1B3-more-warmup.slurm b/train/tr3-1B3-baseline/tr3d-1B3-more-warmup.slurm new file mode 100644 index 0000000000000000000000000000000000000000..1e664ab8cdf7615d45c78bda5d0f92bc56581d5f --- /dev/null +++ b/train/tr3-1B3-baseline/tr3d-1B3-more-warmup.slurm @@ -0,0 +1,178 @@ +#!/bin/bash +#SBATCH --job-name=1B3-v2.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 + +set -x -e + + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3d-1B3-more-warmup/ +MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr3d-1B3-more-warmup.$SLURM_JOBID.out diff --git a/train/tr3-1B3-baseline/tr3d-1B3-oscar-training2.slurm b/train/tr3-1B3-baseline/tr3d-1B3-oscar-training2.slurm new file mode 100644 index 0000000000000000000000000000000000000000..53ff6e4e9dc1f3211385d08797bbd15cab0f3c08 --- /dev/null +++ b/train/tr3-1B3-baseline/tr3d-1B3-oscar-training2.slurm @@ -0,0 +1,184 @@ +#!/bin/bash +#SBATCH --job-name=tr3d-1B3-oscar.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 + +set -x -e + + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3d-1B3-oscar +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr3d-1B3-oscar-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs +# You need to git clone the Megatron-DeepSpeed +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +# defining the right environment variables +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +# testing for potential faulty nodes +srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1` +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=1 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=146_484_375 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +SAVE_INTERVAL=1500 + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " +# TODO: Add --codecarbon-dir $CODECARBON_PATH \ if you want to use codecarbon, not adding it for now to make the current +# series of experiments consistent, especially speed-wise. Adding it once Tr6 and Tr7 are done + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# We create the folder where the logs and codecarbon will be stored. +mkdir -p $LOGS_PATH +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt diff --git a/train/tr3-1B3-baseline/tr3e-1B3-c4-training2.slurm b/train/tr3-1B3-baseline/tr3e-1B3-c4-training2.slurm new file mode 100644 index 0000000000000000000000000000000000000000..1ba66f2dd146b81c339ea66b9f9671774e2437c7 --- /dev/null +++ b/train/tr3-1B3-baseline/tr3e-1B3-c4-training2.slurm @@ -0,0 +1,184 @@ +#!/bin/bash +#SBATCH --job-name=tr3e-1B3-c4.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 + +set -x -e + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3e-1B3-c4 +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr3e-1B3-c4-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs +# You need to git clone the Megatron-DeepSpeed +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/c4_preprocessing/c4_en_train_text_document + +# defining the right environment variables +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +# testing for potential faulty nodes +srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1` +MASTER_PORT=6000 + +# TODO: this is our base config for 1B3, edit PP/TP/batch size/model config if smaller or bigger +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=1 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=146_484_375 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +SAVE_INTERVAL=1500 + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " +# TODO: Add --codecarbon-dir $CODECARBON_PATH \ if you want to use codecarbon, not adding it for now to make the current +# series of experiments consistent, especially speed-wise. Adding it once Tr6 and Tr7 are done + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# We create the folder where the logs and codecarbon will be stored. +mkdir -p $LOGS_PATH +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt diff --git a/train/tr3-1B3-baseline/tr3e-1B3-diagnostic1-warmup-c4.slurm b/train/tr3-1B3-baseline/tr3e-1B3-diagnostic1-warmup-c4.slurm new file mode 100644 index 0000000000000000000000000000000000000000..f925e4da87d6af061711be036b29e509d45869c1 --- /dev/null +++ b/train/tr3-1B3-baseline/tr3e-1B3-diagnostic1-warmup-c4.slurm @@ -0,0 +1,176 @@ +#!/bin/bash +#SBATCH --job-name=1B3-diagnostic1.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 + +set -x -e + + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3e-1B3-diagnostic1-warmup-c4 +MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed + +DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/c4_preprocessing/c4_en_train_text_document + +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path t5-small \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr3e-1B3-diagnostic1-warmup-c4.$SLURM_JOBID.out diff --git a/train/tr3-1B3-baseline/tr3f-1B3-diagnostic2-low-warmup-oscar.slurm b/train/tr3-1B3-baseline/tr3f-1B3-diagnostic2-low-warmup-oscar.slurm new file mode 100644 index 0000000000000000000000000000000000000000..f475aadafa6e65ddf508a637337bfccc1da4d692 --- /dev/null +++ b/train/tr3-1B3-baseline/tr3f-1B3-diagnostic2-low-warmup-oscar.slurm @@ -0,0 +1,177 @@ +#!/bin/bash +#SBATCH --job-name=1B3-diagnostic2.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 + +set -x -e + + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3f-1B3-diagnostic2-low-warmup-oscar +MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=16 +GLOBAL_BATCH_SIZE=1024 +TRAIN_ITER=146_484_375 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 1e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 126_953_125 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr3f-1B3-diagnostic2-low-warmup-oscar.$SLURM_JOBID.out diff --git a/train/tr3-1B3-baseline/tr3g-760M-v2.slurm b/train/tr3-1B3-baseline/tr3g-760M-v2.slurm new file mode 100644 index 0000000000000000000000000000000000000000..0150fbfaff42b35cc130a431ac6d173466efe0fc --- /dev/null +++ b/train/tr3-1B3-baseline/tr3g-760M-v2.slurm @@ -0,0 +1,178 @@ +#!/bin/bash +#SBATCH --job-name=760M-v2.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 + +set -x -e + + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3g-760M-v2/ +MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=1536 +NHEADS=16 +FFN_HIDDEN_SIZE=6144 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2.5e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr3g-760M-v2.$SLURM_JOBID.out diff --git a/train/tr3-1B3-baseline/tr3h-350M-v2.slurm b/train/tr3-1B3-baseline/tr3h-350M-v2.slurm new file mode 100644 index 0000000000000000000000000000000000000000..a2bb6bd99153ff6355a4902ef0d45bbaa1f07977 --- /dev/null +++ b/train/tr3-1B3-baseline/tr3h-350M-v2.slurm @@ -0,0 +1,178 @@ +#!/bin/bash +#SBATCH --job-name=350M-v2.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=4 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 + +set -x -e + + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3h-350M-v2/ +MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=4 +PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=1024 +NHEADS=16 +FFN_HIDDEN_SIZE=4096 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 3e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr3h-350M-v2.$SLURM_JOBID.out diff --git a/train/tr3-1B3-baseline/tr3i-125M-v2.slurm b/train/tr3-1B3-baseline/tr3i-125M-v2.slurm new file mode 100644 index 0000000000000000000000000000000000000000..a2a025753948081a996e2b45aebcf2a47bbc982e --- /dev/null +++ b/train/tr3-1B3-baseline/tr3i-125M-v2.slurm @@ -0,0 +1,178 @@ +#!/bin/bash +#SBATCH --job-name=125M-v2.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=4 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 + +set -x -e + + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3i-125M-v2/ +MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=4 +PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=12 +NHIDDEN=768 +NHEADS=16 +FFN_HIDDEN_SIZE=3072 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 6e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr3i-125M-v2.$SLURM_JOBID.out diff --git a/train/tr3-1B3-baseline/tr3j-760M-pile.slurm b/train/tr3-1B3-baseline/tr3j-760M-pile.slurm new file mode 100644 index 0000000000000000000000000000000000000000..4ecee6daa098152f0be9a063d2ef374c14d5e4e8 --- /dev/null +++ b/train/tr3-1B3-baseline/tr3j-760M-pile.slurm @@ -0,0 +1,178 @@ +#!/bin/bash +#SBATCH --job-name=760M-pile.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 + +set -x -e + + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3j-760M-pile/ +MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets/pile/pile_text_document + +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=1536 +NHEADS=16 +FFN_HIDDEN_SIZE=6144 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2.5e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr3j-760M-pile.$SLURM_JOBID.out diff --git a/train/tr3-1B3-baseline/tr3k-350M-pile.slurm b/train/tr3-1B3-baseline/tr3k-350M-pile.slurm new file mode 100644 index 0000000000000000000000000000000000000000..576244ceeef90da741b622c8bd4defdaeef42d3c --- /dev/null +++ b/train/tr3-1B3-baseline/tr3k-350M-pile.slurm @@ -0,0 +1,178 @@ +#!/bin/bash +#SBATCH --job-name=350M-pile.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=4 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 + +set -x -e + + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3k-350M-pile/ +MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets/pile/pile_text_document + +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=4 +PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=1024 +NHEADS=16 +FFN_HIDDEN_SIZE=4096 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 3e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr3k-350M-pile.$SLURM_JOBID.out diff --git a/train/tr3-1B3-baseline/tr3l-125M-pile.slurm b/train/tr3-1B3-baseline/tr3l-125M-pile.slurm new file mode 100644 index 0000000000000000000000000000000000000000..0292fadb8145cdf1664c075a758e33c9c750ff55 --- /dev/null +++ b/train/tr3-1B3-baseline/tr3l-125M-pile.slurm @@ -0,0 +1,178 @@ +#!/bin/bash +#SBATCH --job-name=125M-pile.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=4 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 + +set -x -e + + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3l-125M-pile/ +MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets/pile/pile_text_document + +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=4 +PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=12 +NHIDDEN=768 +NHEADS=16 +FFN_HIDDEN_SIZE=3072 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 6e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr3l-125M-pile.$SLURM_JOBID.out diff --git a/train/tr3-1B3-baseline/tr3m-1B3-emb-norm-pile-optim-reset.slurm b/train/tr3-1B3-baseline/tr3m-1B3-emb-norm-pile-optim-reset.slurm new file mode 100644 index 0000000000000000000000000000000000000000..63ac83ce2661cd232a14570fd4a5df86c45eb9ad --- /dev/null +++ b/train/tr3-1B3-baseline/tr3m-1B3-emb-norm-pile-optim-reset.slurm @@ -0,0 +1,183 @@ +#!/bin/bash +#SBATCH --job-name=1B3-pile-emb-norm-optim-reset +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + + +set -x -e + +source $six_ALL_CCFRWORK/code/tr8b-104B/bigscience/train/tr8b-104B/start-tr8b-104B + +echo "START TIME: $(date)" + +ROUND=2 +TESTING=0 + +VARIANT=emb-norm-optim-reset + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3m-1B3-emb-norm-pile/ +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints/$VARIANT +TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tr3m-1B3-pile-tensorboard/$VARIANT +LOGS_PATH=$DATA_OUTPUT_PATH/$VARIANT/logs +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr8b-104B/Megatron-DeepSpeed-emb-norm + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets/pile/pile_text_document + +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=1 + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=5_000_000 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --checkpoint-activations \ + --fp16 \ + --embed-layernorm \ + --seed 1234 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + --log-level info \ + --log-level-replica error \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $LOGS_PATH/main_log.txt + + +echo "END TIME: $(date)" diff --git a/train/tr3-1B3-baseline/tr3m-1B3-emb-norm-pile.slurm b/train/tr3-1B3-baseline/tr3m-1B3-emb-norm-pile.slurm new file mode 100644 index 0000000000000000000000000000000000000000..a5455be0cc216d3205bd23ece60f5238ec2d6007 --- /dev/null +++ b/train/tr3-1B3-baseline/tr3m-1B3-emb-norm-pile.slurm @@ -0,0 +1,184 @@ +#!/bin/bash +#SBATCH --job-name=1B3-pile-emb-norm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + + +set -x -e + +source $six_ALL_CCFRWORK/start-prod + +echo "START TIME: $(date)" + +ROUND=2 +TESTING=0 + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3m-1B3-emb-norm-pile/ +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard +LOGS_PATH=$DATA_OUTPUT_PATH/logs +mkdir -p $LOGS_PATH + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets/pile/pile_text_document + +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=1 + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 +# TRAIN_ITER=146_484_375 +# x1.10 +TRAIN_ITER=161_132_812 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --checkpoint-activations \ + --fp16 \ + --embed-layernorm \ + --seed 1234 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + --log-level info \ + --log-level-replica error \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $LOGS_PATH/main_log.txt + + +echo "END TIME: $(date)" diff --git a/train/tr3-1B3-baseline/tr3m-1B3-pile.slurm b/train/tr3-1B3-baseline/tr3m-1B3-pile.slurm new file mode 100644 index 0000000000000000000000000000000000000000..bed8cb71e4beae549a15041ddb07cdb42c4b3d01 --- /dev/null +++ b/train/tr3-1B3-baseline/tr3m-1B3-pile.slurm @@ -0,0 +1,178 @@ +#!/bin/bash +#SBATCH --job-name=1B3-pile.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 + +set -x -e + + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr3m-1B3-pile/ +MEGATRON_DEEPSPEED_REPO=$SCRATCH/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets/pile/pile_text_document + +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=146_484_375 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr3m-1B3-pile.$SLURM_JOBID.out diff --git a/train/tr3-1B3-baseline/tr3n-1B3-pile-fancy.slurm b/train/tr3-1B3-baseline/tr3n-1B3-pile-fancy.slurm new file mode 100644 index 0000000000000000000000000000000000000000..e1a00277563999ccb7566d0e0900abbee865c972 --- /dev/null +++ b/train/tr3-1B3-baseline/tr3n-1B3-pile-fancy.slurm @@ -0,0 +1,191 @@ +#!/bin/bash +#SBATCH --job-name=1B3-pile-fancy.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=64 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%a-%j.out # output file name +#SBATCH --error=%x-%a-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 +#SBATCH --mail-type=ALL +#SBATCH --mail-user=victor@huggingface.co + +set -x -e + + +ROUND=2 +TESTING=0 + +EXPERIMENT_NAME=tr3n-1B3-pile-fancy +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/$EXPERIMENT_NAME/ +CHECKPOINT_PATH=$OUTPUT_PATH/checkpoints +REPO_PATH=$OUTPUT_PATH/$EXPERIMENT_NAME-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs + +MEGATRON_DEEPSPEED_REPO=$ALL_CCFRWORK/code/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets/pile/pile_text_document + +# defining the right environment variables +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +# so processes know who to talk to +MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1` +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=64 +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=146_484_375 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 146_484_375 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# We create the folder where the logs and codecarbon will be stored. +mkdir -p $LOGS_PATH +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt diff --git a/train/tr3-1B3-baseline/tr3o-1B3-pile-no-dropout.slurm b/train/tr3-1B3-baseline/tr3o-1B3-pile-no-dropout.slurm new file mode 100644 index 0000000000000000000000000000000000000000..d35de0a8a69ee09ccdb2d0113771d0fd0acec692 --- /dev/null +++ b/train/tr3-1B3-baseline/tr3o-1B3-pile-no-dropout.slurm @@ -0,0 +1,193 @@ +#!/bin/bash +#SBATCH --job-name=1B3-pile-no-dropout.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=64 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%a-%j.out # output file name +#SBATCH --error=%x-%a-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 +#SBATCH --mail-type=ALL +#SBATCH --mail-user=victor@huggingface.co + +set -x -e + + +ROUND=2 +TESTING=0 + +EXPERIMENT_NAME=tr3o-1B3-pile-no-dropout +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/$EXPERIMENT_NAME/ +CHECKPOINT_PATH=$OUTPUT_PATH/checkpoints +REPO_PATH=$OUTPUT_PATH/$EXPERIMENT_NAME-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs + +MEGATRON_DEEPSPEED_REPO=$ALL_CCFRWORK/code/Megatron-DeepSpeed + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets/pile/pile_text_document + +# defining the right environment variables +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +# so processes know who to talk to +MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1` +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=64 +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=146_484_375 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 146_484_375 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --hidden-dropout 0.0 \ + --attention-dropout 0.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# We create the folder where the logs and codecarbon will be stored. +mkdir -p $LOGS_PATH +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt diff --git a/train/tr4-1B3-rotary/README.md b/train/tr4-1B3-rotary/README.md new file mode 100644 index 0000000000000000000000000000000000000000..647f58e917cba66fbe168802a33595aee59187c5 --- /dev/null +++ b/train/tr4-1B3-rotary/README.md @@ -0,0 +1,20 @@ +## Tr4c + +On the supercomputer, we prepared the folders +```bash +cd $six_ALL_CCFRSCRATCH/synched_exps/ +huggingface-cli repo create tr4c-1B3-rotary-oscar-logs --organization bigscience +huggingface-cli repo create tr4c-1B3-rotary-oscar-checkpoints --organization bigscience +mkdir tr4c-1B3-rotary-oscar +cd tr4c-1B3-rotary-oscar +git clone https://huggingface.co/bigscience/tr4c-1B3-rotary-oscar-logs +git clone https://huggingface.co/bigscience/tr4c-1B3-rotary-oscar-checkpoints +mv tr4c-1B3-rotary-oscar-checkpoints checkpoints +cd tr4c-1B3-rotary-oscar-logs +mkdir logs +``` + +And then launch the jobs: +``` +sbatch --array=1-11%1 $SCRATCH/repos/bigscience/train/tr4-1B3-rotary/tr4c-1B3-oscar-modeling-rotary.slurm +``` \ No newline at end of file diff --git a/train/tr4-1B3-rotary/tr4-1B3-modeling-rotary-hub-sync-logs.slurm b/train/tr4-1B3-rotary/tr4-1B3-modeling-rotary-hub-sync-logs.slurm new file mode 100644 index 0000000000000000000000000000000000000000..a377abafeadf7c8eb727516b798449101fe9dc07 --- /dev/null +++ b/train/tr4-1B3-rotary/tr4-1B3-modeling-rotary-hub-sync-logs.slurm @@ -0,0 +1,22 @@ +#!/bin/bash +#SBATCH --job-name=tr4-1B3-hub-sync-logs # job name +#SBATCH --ntasks=1 # number of MP tasks +#SBATCH --nodes=1 # number of nodes +#SBATCH --cpus-per-task=1 # number of cores per task +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --partition=prepost +#SBATCH --account=six@cpu + +echo "START TIME: $(date)" + +module load git-lfs + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr4-1B3-rotary +LOGS_PATH=$DATA_OUTPUT_PATH/logs +BIG_SCIENCE_REPO_PATH=$DATA_OUTPUT_PATH/code/bigscience + +$BIG_SCIENCE_REPO_PATH/tools/hub-sync.py --repo-path $LOGS_PATH --patterns '*.out' -d + +echo "END TIME: $(date)" diff --git a/train/tr4-1B3-rotary/tr4-1B3-modeling-rotary-hub-sync-tensorboard.slurm b/train/tr4-1B3-rotary/tr4-1B3-modeling-rotary-hub-sync-tensorboard.slurm new file mode 100644 index 0000000000000000000000000000000000000000..01120e38626866de4313f74e325b234ba84ad67c --- /dev/null +++ b/train/tr4-1B3-rotary/tr4-1B3-modeling-rotary-hub-sync-tensorboard.slurm @@ -0,0 +1,23 @@ +#!/bin/bash +#SBATCH --job-name=tr4-1B3-hub-sync-tensorboard # job name +#SBATCH --ntasks=1 # number of MP tasks +#SBATCH --nodes=1 # number of nodes +#SBATCH --cpus-per-task=1 # number of cores per task +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --partition=prepost +#SBATCH --account=six@cpu + +echo "START TIME: $(date)" + +module load git-lfs + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr4-1B3-rotary +TENSORBOARD_PATH=$DATA_OUTPUT_PATH/tensorboard +BIG_SCIENCE_REPO_PATH=$DATA_OUTPUT_PATH/code/bigscience + +$BIG_SCIENCE_REPO_PATH/tools/hub-sync.py --repo-path $TENSORBOARD_PATH --patterns '*tfevents*' -d + +echo "END TIME: $(date)" + diff --git a/train/tr4-1B3-rotary/tr4-1B3-modeling-rotary.slurm b/train/tr4-1B3-rotary/tr4-1B3-modeling-rotary.slurm new file mode 100644 index 0000000000000000000000000000000000000000..9c38a66a06ec35f185075c5cb33a29cf71f652e1 --- /dev/null +++ b/train/tr4-1B3-rotary/tr4-1B3-modeling-rotary.slurm @@ -0,0 +1,179 @@ +#!/bin/bash +#SBATCH --job-name=1B3-rotary.slurm +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=logs/%x-%j.out # output file name +#SBATCH --account=six@v100 +#SBATCH --array=1-10%1 + +set -x -e +source $six_ALL_CCFRWORK/start-prod + +ROUND=2 +TESTING=0 + +# Prevent internet access +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 + +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr4-1B3-rotary +MEGATRON_DEEPSPEED_REPO=$OUTPUT_PATH/code/Megatron-DeepSpeed + +if [[ ${TESTING} == 1 ]]; then + # testing on 10k + DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/c4_preprocessing/c4_100k_text_document +else + # production on full 304M records + DATA_PATH=$six_ALL_CCFRSCRATCH/datasets-custom/c4_preprocessing/c4_en_train_text_document + +fi + +pushd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 #150B tokens + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path t5-small \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + --position-embedding-type rotary \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr3-1B3-modeling-baseline.$SLURM_JOBID.out diff --git a/train/tr4-1B3-rotary/tr4b-350M-modeling-rotary.slurm b/train/tr4-1B3-rotary/tr4b-350M-modeling-rotary.slurm new file mode 100644 index 0000000000000000000000000000000000000000..20c81ca36ce340c82797b9997dbd460eaee99e95 --- /dev/null +++ b/train/tr4-1B3-rotary/tr4b-350M-modeling-rotary.slurm @@ -0,0 +1,179 @@ +#!/bin/bash +#!/bin/bash +#SBATCH --job-name=350M-rotary.slurm +#SBATCH --nodes=4 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=logs/%x-%j.out # output file name +#SBATCH --account=six@v100 + +# This is compared with tr3h-350M-v2.slurm + +set -x -e + + +ROUND=2 +TESTING=0 + +OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr4b-350M-rotary +MEGATRON_DEEPSPEED_REPO=$OUTPUT_PATH/code/Megatron-DeepSpeed + +VOCAB_FILE=$OUTPUT_PATH/data/gpt2-vocab.json +MERGE_FILE=$OUTPUT_PATH/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# adjust depending on the number of the nodes + +# XXX: edit me +GPUS_PER_NODE=4 +NNODES=4 +PP_SIZE=2 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=1024 +NHEADS=16 +FFN_HIDDEN_SIZE=4096 +SEQ_LEN=2048 + +if [[ ${ROUND} == 1 ]]; then EXIT_INTERVAL=100 SAVE_INTERVAL=10 +elif [[ ${ROUND} == 2 ]]; then SAVE_INTERVAL=1500 +else echo "invalid ROUND: $ROUND" +fi + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 3e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --position-embedding-type rotary\ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $OUTPUT_PATH/tensorboard \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $OUTPUT_PATH/checkpoints \ + --load $OUTPUT_PATH/checkpoints \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee $OUTPUT_PATH/logs/tr4b-350M.$SLURM_JOBID.out diff --git a/train/tr4-1B3-rotary/tr4c-1B3-oscar-modeling-rotary.slurm b/train/tr4-1B3-rotary/tr4c-1B3-oscar-modeling-rotary.slurm new file mode 100644 index 0000000000000000000000000000000000000000..fc790376f07d49bbc38a22cc198dfe274939bec5 --- /dev/null +++ b/train/tr4-1B3-rotary/tr4c-1B3-oscar-modeling-rotary.slurm @@ -0,0 +1,182 @@ +#!/bin/bash +#SBATCH --job-name=1B3-rotary-oscar.slurm +#SBATCH --qos=qos_gpu-t3 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=/gpfsdswork/projects/rech/six/uue59kq/logs/%x-%j.out # output file name +#SBATCH --error=/gpfsdswork/projects/rech/six/uue59kq/logs/%x-%j.err # error file name +#SBATCH --account=six@v100 + +set -x -e + +# TODO: modify these for your training setup, just Ctrl-F replace +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr4c-1B3-rotary-oscar +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr4c-1B3-rotary-oscar-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs +MEGATRON_DEEPSPEED_REPO=$SCRATCH/repos/Megatron-DeepSpeed + +# TODO: you may change the dataset, some examples are at tr3-1B3-baseline (tr3 = c4 + t5-tokenizer, tr3m = the Pile) +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +# defining the right environment variables +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +# so processes know who to talk to +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +# TODO: this is our base config for 1B3, edit PP/TP/batch size/model config if smaller or bigger +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=4 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=4 # always fixed to the size of a single node +DP_SIZE=$((NNODES * GPUS_PER_NODE / (PP_SIZE * TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=73_242_187 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +SAVE_INTERVAL=1500 + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 73_242_187 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --position-embedding-type rotary \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " +# TODO: Add --codecarbon-dir $CODECARBON_PATH \ if you want to use codecarbon, not adding it for now to make the current +# series of experiments consistent, especially speed-wise. Adding it once Tr6 and Tr7 are done + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat <$config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + $(pwd)/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + +echo $CMD + +# We create the folder where the logs and codecarbon will be stored. +mkdir -p $LOGS_PATH +# Uncomment if you use codecarbon +# mkdir -p $CODECARBON_PATH + +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt diff --git a/train/tr5-1B3-multilingual/oscar/tr5a-1B3-multilingual-mt5tok.slurm b/train/tr5-1B3-multilingual/oscar/tr5a-1B3-multilingual-mt5tok.slurm new file mode 100644 index 0000000000000000000000000000000000000000..a1e7bd34922b341c776bdce94e35a9ac46807d33 --- /dev/null +++ b/train/tr5-1B3-multilingual/oscar/tr5a-1B3-multilingual-mt5tok.slurm @@ -0,0 +1,181 @@ +#!/bin/bash +#SBATCH --job-name=tr5a-1B3-multilingual-mt5tok.slurm +#SBATCH --partition=gpu_p13 +#SBATCH --nodes=16 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --error=%x-%j.out # error file name (same to watch just one file) +#SBATCH --account=six@v100 + +set -x -e + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/synched_exps/tr5a-1B3-multilingual-mt5tok +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr5a-1B3-multilingual-mt5tok-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs +MEGATRON_DEEPSPEED_REPO=$ALL_CCFRWORK/code/Megatron-DeepSpeed + +TRAIN_DATA_PATH=`cat $DATA_OUTPUT_PATH/sampling_probs/train_data_string.0.3.txt` +VALID_DATA_PATH=`cat $DATA_OUTPUT_PATH/sampling_probs/valid_data_string.0.3.txt` +TEST_DATA_PATH=`cat $DATA_OUTPUT_PATH/sampling_probs/test_data_string.0.3.txt` + +# defining the right environment variables +source $six_ALL_CCFRWORK/start-prod +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics +export HF_DATASETS_OFFLINE=1 +export TRANSFORMERS_OFFLINE=1 +cd $MEGATRON_DEEPSPEED_REPO + +# testing for potential faulty nodes +srun --jobid $SLURM_JOBID bash -c 'python -c "import torch, socket; print(socket.gethostname(), torch.cuda.is_available())"' + +# so processes know who to talk to +MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1` +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=16 +PP_SIZE=8 # NLAYERS must be a multiple of PP_SIZE here +TP_SIZE=1 # always fixed to the size of a single node +DP_SIZE=$((NNODES*GPUS_PER_NODE/(PP_SIZE*TP_SIZE))) # will get derived automatically by trainer + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=512 +TRAIN_ITER=146_484_375 + +NLAYERS=24 +NHIDDEN=2048 +NHEADS=16 +FFN_HIDDEN_SIZE=8192 +SEQ_LEN=2048 + +SAVE_INTERVAL=1500 + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 2e-4 \ + --min-lr 1e-5 \ + --lr-decay-style cosine \ + --lr-decay-samples 146_484_375 \ + --lr-warmup-samples 183_105 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --ffn-hidden-size $FFN_HIDDEN_SIZE \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --rampup-batch-size 32 32 2_000_000 \ + --train-samples $TRAIN_ITER \ + --tokenizer-type PretrainedFromHF \ + --tokenizer-name-or-path google/mt5-base \ + --loss-scale 12 \ + --clip-grad 1.0 \ + --fp16 \ + --checkpoint-activations \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 200 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 10000 \ + --eval-iters 100 \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --train-weighted-split-paths $TRAIN_DATA_PATH \ + --valid-weighted-split-paths $VALID_DATA_PATH \ + --test-weighted-split-paths $TEST_DATA_PATH \ + --data-impl mmap \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + + +# # clear old checkpoint as it'd mismatch while we sort things out +# rm -rf $SAVE_CHECKPOINT_PATH + + +echo $CMD + +# We create the folder where the logs and codecarbon will be stored. +mkdir -p $LOGS_PATH +# to debug - add echo (it exits and prints what it would have launched) +srun --jobid $SLURM_JOBID bash -c "$LAUNCHER --node_rank \$SLURM_PROCID $CMD" 2>&1 | tee -a $LOGS_PATH/main_log.txt diff --git a/train/tr8-104B-wide/chronicles.md b/train/tr8-104B-wide/chronicles.md new file mode 100644 index 0000000000000000000000000000000000000000..0aa3bc7e5b8945d0a0f31bb1debc5ce3bce2e065 --- /dev/null +++ b/train/tr8-104B-wide/chronicles.md @@ -0,0 +1,979 @@ +# tr8-104B Chronicles + +Notes on the training progress with a particular focus on any encountered problems and their diagnosis and solutions/prevention. + +The timeline is from the top-down, more recent events are documented last. + +To follow the training progress charts, see: [tensorboard](https://huggingface.co/bigscience/tr8-104B-logs/tensorboard) + +To follow the raw training logs see: [logs](https://huggingface.co/bigscience/tr8-104B-logs/tree/main/logs) + +The currently used SLURM script is at [tr8-104B.slurm](tr8-104B.slurm). + +## Experiment 1 + +- Nodes: `64` +- Seed: `42` +- Started from iteration 0 + +![tr8-104B-glitch-1.png](images/tr8-104B-glitch-1.png) + +Somewhere between iteration 7000 and 7010 lm loss jumped from 6.4 to 14 and then 200 iterations later it went down to ~7 and stayed there w/o any change, and later it went into NaN. + +``` + iteration 7000/ 159576 | consumed samples: 260912 | elapsed time per iteration (ms): 18706.1 | learning rate: 6.000E-05 | global batch size: 96 | lm loss: 6.444662E+00 | loss scale: 2048.0 | grad norm: 98258.265 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +------------------------------------------------------------------------------------------------ + validation loss at iteration 7000 | lm loss value: 7.174200E+00 | lm loss PPL: 1.305315E+03 | +------------------------------------------------------------------------------------------------ + iteration 7010/ 159576 | consumed samples: 261872 | elapsed time per iteration (ms): 19904.0 | learning rate: 6.000E-05 | global batch size: 96 | lm loss: 1.142026E+01 | loss scale: 2048.0 | grad norm: 219645.978 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | + [...] + iteration 7220/ 159576 | consumed samples: 282032 | elapsed time per iteration (ms): 18333.4 | learning rate: 6.000E-05 | global batch size: 96 | lm loss: 7.155109E+00 | loss scale: 2048.0 | grad norm: 16921.991 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | + +``` + + +Training more hasn't helped at all. + +Solution: +- roll back to the last good checkpoint `global_step6210` +- change seed. New seed `43`. + +Rollback: +1. checkpoints: +``` +cd /gpfsscratch/rech/six/commun/checkpoints/tr8-104B/checkpoints +``` + +moved all checkpoints after `global_step6210` to another dir + +fixed the `latest*` files to point to the checkpoint of our choice: + +``` +cat latest +perl -pi -e 's|\d+|6210|' latest* +``` +check it's correct: +``` +cat latest +cat latest_checkpointed_iteration.txt +``` + + +2. couldn't leave tensorboard files from the unrolled section as is, so fixed tensorboard by first copying all the existing events log files to a new dir +``` +cd /gpfsscratch/rech/six/commun/checkpoints/tr8-104B/tr8-104B-logs/tensorboard +mkdir tb-7k-glitch +cp events* tb-7k-glitch +git add tb-7k-glitch +git commit -m "saved the original tensorboard logs" tb-7k-glitch +git push +``` +now checking the timestamp of the last checkpoint `global_step6210` we are rolling from and now manually removing all event log files from the main log whose timestamp is newer than the checkpoint `global_step6210` + +now we have 2 tensorboards - the main running one and the one which we couldn't recover from - but we want it for posterity + +Having a new seed forced regeneration of `.pny` files which re-randomized the order. If the glitch were due to faulty data this should have fixed the problem. + +Started a new training from the last good checkpoint and it run until we run into a similar glitch even sooner. + +## Experiment 2 + + +- Nodes: `64` +- Seed: `43` (from the beginning) +- Restarted from `global_step6210` + +![tr8-104B-glitch-1.png](images/tr8-104B-glitch-1.png) + + +Similar to glitch 1, but even sooner we went from 6.3 to 9 to 6.7 + + +``` + iteration 6900/ 159576 | consumed samples: 251312 | elapsed time per iteration (ms): 18495.6 | learning rate: 6.000E-05 | global batch size: 96 | lm loss: 6.365808E+00 | loss scale: 4096.0 | grad norm: 95313.572 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | + iteration 6910/ 159576 | consumed samples: 252272 | elapsed time per iteration (ms): 18802.1 | learning rate: 6.000E-05 | global batch size: 96 | lm loss: 6.598378E+00 | loss scale: 4096.0 | grad norm: 84678.880 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | + iteration 6920/ 159576 | consumed samples: 253232 | elapsed time per iteration (ms): 18641.0 | learning rate: 6.000E-05 | global batch size: 96 | lm loss: 7.314456E+00 | loss scale: 4096.0 | grad norm: 122716.232 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | + iteration 6930/ 159576 | consumed samples: 254192 | elapsed time per iteration (ms): 18564.1 | learning rate: 6.000E-05 | global batch size: 96 | lm loss: 9.121927E+00 | loss scale: 4096.0 | grad norm: 283384.130 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | + iteration 6940/ 159576 | consumed samples: 255152 | elapsed time per iteration (ms): 18549.7 | learning rate: 6.000E-05 | global batch size: 96 | lm loss: 1.023865E+01 | loss scale: 4096.0 | grad norm: 42359.376 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +``` + +Conglong Li made an interesting observation that in both cases the glitch happened very closely to the moment where LR warmup stopped, to quote: + +> The gradients are the largest at the end of the LR ramp-up phase, so that's when there the training is the most unstable. There is no easy fix. Curriculum learning helps, and so does potentially lengthening warm-up/reducing the LR. +> +> We logged the L1 norm/max element of Adam optimizer's gradient variance, and found that 1) the norm and max element has a correlation with the LR schedule: they all reach max at/near LR peak 2) that is also where we have the highest risk of divergence. + +Moreover, we reviewed the tr1-13B training and we had a huge glitch there from which it recovered, perhaps since the model was much smaller. + +![tr1-13B-glitch-1-2.png](images/tr1-13B-glitch-1-2.png) + +There the LR rampup stopped around 25k, and the first huge glitch occurred at around 29k iteration +https://huggingface.co/bigscience/tr1-13B-tensorboard/tensorboard +According to Conglong Li 25k and 29k are close enough based on their study. Quoting Conglong: + +> In our study of [1.5B gpt-2](https://arxiv.org/pdf/2108.06084.pdf), we used 3K LR warmup and here you can see the grad variance norm (left) only reach bottom at 8K+ steps, and baseline's grad var max is unstable during first 10K+ steps: + +![step-wise-adam-variance](images/step-wise-adam-variance-1.png) + +So it looks that now we have 3 documented glitches that all are related to the end of the LR warm up end. + +We are not yet excluding the case that something is wrong with the data. Going to look into it next. + +After some more iterations this training went belly up in the same way via a second smaller glitch followed by loss going to NaN. Same as the first one just at a slightly different iteration. + +Stopped this training. + +So rolling back tensorboard and checkpoints again as described in the previous section + +Also until we figure out the stability going to switch to a much more frequent checkpoint saving so it's much faster continue from the last good iteration. Let's try 300. +``` +SAVE_INTERVAL=300 +``` + + +## Experiment 3 + +Corby Rosset suggested we try a more numerically stable self-attention version, which was implemented [here](https://github.com/bigscience-workshop/Megatron-DeepSpeed/pull/118) + +Quoting: + +Re: 104B instability (https://huggingface.slack.com/archives/C01NHER1JLS/p1632801340055000) One thing I've encountered before is how the self-attention is computed. E.g. this [line](https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/c839a8aa30731f71b3738d56009be9668508e366/megatron/model/transformer.py#L303) shows that the norm_factor may be multiplied after the Query * Key matrix multiplication. If the dim of Q and K are very large, the output may blow up and the norm_factor won't be able to save it. + +Proposal: move the norm_factor inward, so Q and K are scaled down before matrix multiply: +``` +matmul_result = torch.baddbmm( + matmul_result, + 1.0/math.sqrt(self.norm_factor) * query_layer.transpose(0, 1), # [b * np, sq, hn] + 1.0/math.sqrt(self.norm_factor) * key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk] + beta=0.0 if alibi is None else 1.0, alpha=1.0) + +# change view to [b, np, sq, sk] +attention_scores = matmul_result.view(*output_size) +``` + +To make the operation mathematically equivalent, moving the norm factor inward requires taking sqrt again +if n is a scalar, A and B matrices: +``` +n * (A dot B) === (sqrt(n) * A) dot (sqrt(n) * B) +``` + +Also thanks to @RezaYazdaniAminabadi who helped to find where this function is defined in CuBlas https://docs.nvidia.com/cuda/cublas/index.html#cublas-GemmStridedBatchedEx and which includes the definition: + +``` +C+i*strideC=αop(A+i*strideA)op(B+i*strideB)+β(C+i*strideC), for i ∈[0,batchCount−1] +``` +the issue is alpha is multiplied after the matrix-matrix mul is done so it can cause instability + +- Repeated the same rollback to iteration 6210 +- kept seed 43 (so the same data as experiment 2) + +Also codecarbon has been ignoring the `log_level="error"` setting and bombarding the main log with thousands of warnings, which also indicate that it misses a lot of measurements, so turning it off as the measurements will be wrong anyway. + +Experiment 3 failed in a very similar way as experiment 2 + +![tr8-104B-glitch-3.png](images/tr8-104B-glitch-3.png) + + + +## Experiment 4 + +Iz Beltagy compiled a list of suggestions to try next. Quoting Iz: + +A few thoughts and suggestions + +The previous conversations are discussing two hypotheses for spikes and divergence. + +* bad data: one easy way to test this hypothesis is to shuffle the data. Given that we tried this and it didn’t change much, I am less inclined to believe it is data. I also like to believe that these models are more robust to bad data than we are giving them credit. Maybe the model won’t learn anything useful, but at least it shouldn’t diverge +* it is fp16: this is possible and an easy way to test this hypothesis is to run everything on fp32. If it works, then we know it is fp16, so we go back and try to find the part of the network that is upsetting fp16 whether it is softmax, faulty implementation of selfattention, loss scale .. etc. But if we use fp32 and the model still diverges, then we don’t need to spend time exploring these tricks because we know they are not going to fix the problem + +Let me add a few more hypotheses to the mix + +* We are using beta2=0.999, while gpt3 used 0.95. My understanding is 0.95 is more stable but slower to train. I would try this as well given how easily and quickly the model is diverging + +* Bad model design: prior work showed that the ratio between width and depth is important. That certainly matters for the loss but I don’t know how much it matters for the model stability. However, it is worth noting that this model is much much worse than the 13B model. After 6K steps it reaches loss=6.3 and the loss slowed down while the 13B model reached loss=4.5 and the loss is still sharply decreasing. This could be related; the model is not learning anything useful and thus easily diverging. + +* Regarding the restarts, looking at the loss-scale curve, looks like the model was heading toward divergence long before 6Ksteps, maybe around 4.7Ksteps. If you restart, I would suggest going back to a checkpoint before 4.7K steps. + +* Curriculum learning: it is a great idea, we tried it in a different project (but smaller models) and it works, but the model should train reasonably well without it. Using it now might mask a bigger underlying problem that we need to fix first + +So to summarize, here are things to try +* if you still feel data could be the reason, try shuffling again but restart from step before 4.7K +* if it doesn’t work, disable fp16 and run everything in fp32 restarting from before step 4.7K +* if it doesn’t work, rerun from scratch with beta2=0.95 (I don’t have a good intuition if the model can recover with changing beta2 mid training, it might, I don’t know) +* if it doesn’t work, change model design to one with a reasonable ratio between depth/width + +Samyam Rajbhandari seconded Iz's proposal: + +I second @Iz Beltagy’s suggestion to use beta2=0.95. We have seen it stabilize very similar spiky loss in several cases. My feeling is that it might be ok even without a full restart. But restarting early would be helpful. Depending on how long it takes, I feel like 6K, 4K and 3K would be good candidates to restart in an increasing order of preference. + +So stopped the last experiment and started a new one - identical to Experiment 3, but: + +- rolled back to iteration 3k - so earlier than 6210, as suggested that it might be too close to the blow up and we should try to make things better before that +- `--adam-beta2 0.95` - it should make the training slower but more stable + +Mohammad Shoeybi notes: + +* With respect to reproducibility, we have done a lot of work to make sure Megatron is reproducible, meaning that if you resume from an earlier checkpoint and run on the same number of GPUs, you should see EXACTLY the same behaviour. This implies that dataloaders are also reproducible. +* The spikes sometimes happen during the training and if the loss quickly recovers, it is generally ok. Sometimes it might be due to a set of bad samples but most of the time it is due to optimizers being in a bad state and having values that might underflow in the gradients. What we found that was helpful is to use a lower beta2 in the adam optimizer. Basically the closer beta2 is to beta1, the less chances of these spikes happening. Definitely we don’t want to use a very low value for beta2 (for example beta2=beta1=0.9) as it will slow down the convergence. +* Large learning rate can cause instabilities in the fp16 training (fp16 training is more sensitive to learning rate). I don’t have a solid explanation for this but we found this empirically. +* We also found that the larger the model, the lower the initialization std should be. A rule of thumb is to scale it down to sqrt of hidden size. This also helps with the stability. +* With respect to getting input text from iteration, we report the number of samples consumed, You can instantiate dataset (for example here) and get the sample number directly + +Rollback performed: + +1. checkpoints: +``` +cd /gpfsscratch/rech/six/commun/checkpoints/tr8-104B/checkpoints +``` + +moved all checkpoints after `global_step3000` to another dir + +fixed the `latest*` files to point to the checkpoint of our choice: + +``` +cat latest +perl -pi -e 's|\d+|3000|' latest* +``` +check it's correct: +``` +cat latest +cat latest_checkpointed_iteration.txt +``` + +2. tensorboard, similar to the previous tries. + + +![tr8-104B-glitch-4.png](images/tr8-104B-glitch-4.png) + +It went in the wrong direction around iteration 5k - stopped this experiment. + + +## Experiment 5 + +Same as Experiment 4, but rolling back to iteration 0. + + +![tr8-104B-glitch-5.png](images/tr8-104B-glitch-5.png) + +It trained faster but still went in the wrong direction around iteration 5k - stopped this experiment. + + +## Experiment 6 + +Discovered a really bad mistake in the setup that impacted all the previous experiments that failed. + When changing from 13B to 104B I did not update `FFN_HIDDEN_SIZE` and it remained `20480` when it should have become `65536`, so it has been a very lopsided 58B model instead of 104B all along. + +Lesson learned: I'm going to change the slurm script to not explicitly set `FFN_HIDDEN_SIZE` and let Megatron automatically set `FFN_HIDDEN_SIZE = 4 * HIDDEN_SIZE` to avoid similar errors in the future. + +Additionally, because `FFN_HIDDEN_SIZE` was so small, the fixed setup required re-tune up of the training setup - as a lot more nodes are now required. + +So let's look at the math: + +``` +## Let h = hidden size, n = num_layers, k = num_heads, s = sequence length, v = vocabulary size +total_params = n * (12h^2 + 13h) + (v * h) + (s * h) + 2*h +``` + +the correct 104B is: +- 0.8x times layers=32 than 13B (40) +- 3.2x times NHIDDEN=16384 than 13B (5120) + +While the 104B model is 8x times bigger than 13B param-wise, the model grows quadratically with NHIDDEN size, so each layer will require ~10x (3.2**2) more gpu memory plus more memory per activations. We double TP from 2 to 4 as 4 is a max we can use on a 4-gpu node. So we have to 5x the PP then, so we need at least PP=20, and to work with NLAYERS=32, it takes us to PP=32. + +So the needed config is: +``` +TP_SIZE=4 +PP_SIZE=32 +``` + +so 13B took 8 gpus for a single replica, and 104B needs 128 gpus (16x times more) + +I also switcheed to `--rampup-batch-size 32 32 6_000_000`, because of PP=32 (but that wasn't needed since it's the number of replicas that matters. there needs to be enough batch size to go over replicas, so `BS>n_replicas` and `BS` has to be divisible by `n_replicas`. Corrected this in the following experiment.) + +I'm going to repeat Experiment 5 with : + +- fixed `FFN_HIDDEN_SIZE` (so that it's `4 * HIDDEN_SIZE`) +- `--rampup-batch-size 32 32 6_000_000` + +And the outcome is very similar to Exp 4 and 5 despite the corrected model shape: + +![tr8-104B-glitch-6.png](images/tr8-104B-glitch-6.png) + + + +## Future Experiments + +1. if it doesn’t work, change model design to one with a reasonable ratio between depth/width - currently it's 512. Try 256? (going for the opposite of very deep and shallow won't be learning much) + +2. get back to beta2=0.999 (with spikes) and try fp32. If Corby’s self-attn version made a difference, then maybe it is an fp16 instability issue. + note: This will require approximately 30% more RAM for activation memory - so to run this experiment we have to first test that we can fit it. +``` +perl -pi -e 's|--adam-beta2 0.95|--adam-beta2 0.999|' *slurm +``` + +3. try lower learning rate (half the LR and longer warmup) - ideally wait for CongLong + - try lower target learning rate 2.5e-5 - longer ramp up - past 10k iterations (Samyam) + - max lr the same - twice as long ramp up, half as long, (Stella) + - Curriculum Learning should fix all these lr-related problem (waiting for CL implementation) + +4. reduce SEQLEN 1024K? 512? + +5. Switch to LAMB? But we haven't discussed that in details. + + +## Experiment 7 + +Same as Exp 6 with the following changes: + +1. Trying width/depth ration of 180 instead of 512. Which puts it into a normal range and it's no longer an unusually wide model (in the megatron's paper the ratio grows from 150 to 200 as the model grows). Also raising heads to 80 to again be in the ballpark of normal ratio. + +``` +NLAYERS=64 +NHIDDEN=11600 +NHEADS=80 +``` + + +``` +perl -pi -e 's|NHIDDEN=16384|NHIDDEN=11600|' *slurm +perl -pi -e 's|NLAYERS=32|NLAYERS=64|' *slurm +perl -pi -e 's|NHEADS=32|NHEADS=80|' *slurm +``` + +Note: this should use less gpu memory too, but if it's another short experiment there is no need to re-tune the training setup. + +We don't know whether it was a good idea to change `NHEADS` - it doesn't change the model size, and there is [research](https://blog.ml.cmu.edu/2020/03/20/are-sixteen-heads-really-better-than-one/) that shows that nheads doesn't quite matter. We could have kept `NHEADS=32` and used `NHIDDEN=11616` to keep the model of the same size (tiny change). The two have to respect`NHIDDEN % NHEADS = 0` rule. + +2. reverted back to `--rampup-batch-size 16 16 6_000_000`. We were fine with BS increments of 16 since we can't fit too many replicas anyway. Since currently each replica is 32 nodes, with 64 or 128 nodes we will be using only 2 or 4 replicas, therefore no problem to run BS=16 increments. + +This one failed too: + +![tr8-104B-glitch-7.png](images/tr8-104B-glitch-7.png) + +## Experiment 8 + +Half lr, and 1/3 longer warm up: + +``` + --lr 3e-5 \ + --lr-warmup-samples 300_000 \ +``` + +The failure is very similar to Exp 7 + +![tr8-104B-glitch-8.png](images/tr8-104B-glitch-8.png) + + +## Experiment 9 + + +Trying much longer warmup: + +``` + --lr 3e-5 \ + --lr-warmup-samples 1_000_000 \ +``` + +Very similar failure to Exp 8 + +![tr8-104B-glitch-9.png](images/tr8-104B-glitch-9.png) + + + +## Experiment 10 + +``` +perl -pi -e 's|--lr 3e-5|--lr 1e-5|' *slurm +``` + +Same as Exp 9 but with `lr=1e-5`, + +Initially the plan was to restart Exp 9 from step 6900, but Meg didn't accept the change and crashed with: +``` +AnnealingLR: class input value 1e-05 and checkpointvalue 3e-05 for learning rate do not match +``` + +So had to start from scratch. + +This will be the last experiment that plays with a different max LR value. + +The outcome is very similar to the previous 3 experiments around LR modifications. + +![tr8-104B-glitch-10.png](images/tr8-104B-glitch-10.png) + + + +## Experiments 7-10 Summary + + +Here is the summary of the 4 experiments (7-10) around LR tweaks: + +| Exp | lr | lr-warmup | +| --: | ---: | --------: | +| 7 | 6e-5 | 0.26M | +| 8 | 3e-5 | 0.3M | +| 9 | 3e-5 | 1M | +| 10 | 1e-5 | 1M | + +All 4 had a very similar behavior just the learning stopped and divergence started at progressively later stage. + +![tr8-104B-glitch-7-10.png](images/tr8-104B-glitch-7-10.png) + + +## Future Experiments: Set 2 + +Actionable proposals: + +1. restart Exp 9 from 7k with `lr=1e-5` +2. investigate weight initialization + 530B uses `--init-method-std 0.004` - probably will try that (need to calculate from NHIDDEN) +3. double check that gradient clipping is working +4. try shorter seqlen (`seqlen=512`, 4x the batch size, everything else similar to exp 8) +5. if promising, try curriculum learning to reach the largest seqlen +6. try smaller models instead of the 8x jump from 13B +7. train in full fp32: remove `--fp16` +8. train parts in fp32: add `--attention-softmax-in-fp32` + +Exp 8 has given us the best lm loss, so we are using it as the new baseline for other experiments. + +fp16 is less likely to be an issue, because usually it becomes one after you train for hundreds of thousands of gradient updates where the model weights grow and become large that they overflow the fp16 (example of T5 models that were pre-trained on bf16 and they weren't aware that the weights were huge). + + + +Proposals that needs work: + +3. CL (not ready yet, breaks) + +Proposals that need research and/or code: + +* study weight init + - in particular word embedding matrix init (@thom) + - weight initialization for the Transformer blocks that T5 used (@Sidd Karamcheti) + - T5's weight initialization is coupled to the optimizer used for T5 (Adafactor). Adafactor's initialization and optimization tricks are very good for stability. They're all in the adafactor paper (and in the mesh tf adafactor implementation) (@Colin Raffel) + +* ScaleNorm paper which discussed different ways they stabilized and sped up training including an analysis of weight init on stability. https://arxiv.org/abs/1910.05895 (@Huu Nguyen) + + +Deepspeed side: + +Shaden: +> We might try to prescale gradients instead of postscaling them during the data-parallel averaging. What DP dimension are we using in these experiments? I think there is a small amount of code needed for the zero codepath. +Jeff: +> I think we are already prescaling gradients in this zero code path. I am also curious what DP size is. We have sometimes seen for really large DP sizes sometimes prescaling by world size is too much and essentially zeros out the grads. In that case we could do a combo of pre/post scaling via a pre-devide factor. + +Additional suggestions from twitter: + +François REMY: +> Have you tried a larger regularization? +> Weight decay? + + +## Renaming event names in the old tensorboard to match the new ones + +https://github.com/bigscience-workshop/Megatron-DeepSpeed/pull/39/files renamed the events to group them, so our frozen training branch `tr1-13B` has been now creating TB charts that are incompatible with the one generated by `main`. So let's bring the old to the new, so that we could switch to the newest code while still being able to easily overlay different training graphs. + +First let's see what needs to be remapped: + +I grabbed one new events file + +``` +wget https://huggingface.co/bigscience/tr3m-1B3-pile-tensorboard/resolve/main/events.out.tfevents.1631869980.r13i7n7.78623.0 +``` + +I used the PR above to see what has changed. + +And now I can lookup the event names using pattern lookup, e.g. to find any matches for `grad-norm`: +``` +$ perl -lne 'm|([\w\-_ ]+/[\w\-_ ]+)| && print $1' new/events.out.tfevents.1631869980.r13i7n7.78623.0 | grep -v text_summ | sort | uniq | grep grad-norm +grad-norm/grad-norm +grad-norm/grad-norm vs samples +``` +and so I made the following map: + +``` +"iteration-time" "iteration-time/iteration-time" +"iteration-time vs samples" "iteration-time/iteration-time vs samples" +"learning-rate" "learning-rate/learning-rate" +"learning-rate vs samples" "learning-rate/learning-rate vs samples" +"batch-size" "batch-size/batch-size" +"batch-size vs samples" "batch-size/batch-size vs samples" +"lm loss vs gigaflos" "lm-loss-training/lm loss vs gigaflos" +"lm loss" "lm-loss-training/lm loss" +"lm loss vs samples" "lm-loss-training/lm loss vs samples" +"loss-scale" "loss-scale/loss-scale" +"loss-scale vs samples" "loss-scale/loss-scale vs samples" +"grad-norm" "grad-norm/grad-norm" +"grad-norm vs samples" "grad-norm/grad-norm vs samples" +"num-zeros" "num-zeros/num-zeros" +"num-zeros vs samples" "num-zeros/num-zeros vs samples" +"params-norm" "params-norm/params-norm" +"params-norm vs samples" "params-norm/params-norm vs samples" +"lm loss validation vs samples" "lm-loss-validation/lm loss validation vs samples" +"lm loss validation ppl vs samples" "lm-loss-validation/lm loss validation ppl vs samples" +"lm loss validation vs gigaflos" "lm-loss-validation/lm loss validation vs gigaflos" +"lm loss validation ppl vs gigaflos" "lm-loss-validation/lm loss validation ppl vs gigaflos" +"lm loss validation" "lm-loss-validation/lm loss validation" +"lm loss validation ppl" "lm-loss-validation/lm loss validation ppl" +``` + +And now we just need to feed these to the script someone conveniently published on SO: +https://stackoverflow.com/a/60080531/9201239 +I modified it to work on the files directly which is much easier to feed to `find` and process multiple sub-dirs. +[tb-rename-events.py](./tb-rename-events.py) + +If `$six_ALL_CCFRWORK/bin` is in your `$PATH` the script is already there. + +``` +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "iteration-time" "iteration-time/iteration-time" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "iteration-time vs samples" "iteration-time/iteration-time vs samples" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "learning-rate" "learning-rate/learning-rate" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "learning-rate vs samples" "learning-rate/learning-rate vs samples" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "batch-size" "batch-size/batch-size" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "batch-size vs samples" "batch-size/batch-size vs samples" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "lm loss vs gigaflos" "lm-loss-training/lm loss vs gigaflos" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "lm loss" "lm-loss-training/lm loss" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "lm loss vs samples" "lm-loss-training/lm loss vs samples" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "loss-scale" "loss-scale/loss-scale" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "loss-scale vs samples" "loss-scale/loss-scale vs samples" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "grad-norm" "grad-norm/grad-norm" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "grad-norm vs samples" "grad-norm/grad-norm vs samples" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "num-zeros" "num-zeros/num-zeros" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "num-zeros vs samples" "num-zeros/num-zeros vs samples" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "params-norm" "params-norm/params-norm" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "params-norm vs samples" "params-norm/params-norm vs samples" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "lm loss validation vs samples" "lm-loss-validation/valid/lm loss validation vs samples" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "lm loss validation ppl vs samples" "lm-loss-validation/valid/lm loss validation ppl vs samples" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "lm loss validation vs gigaflos" "lm-loss-validation/valid/lm loss validation vs gigaflos" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "lm loss validation ppl vs gigaflos" "lm-loss-validation/valid/lm loss validation ppl vs gigaflos" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "lm loss validation" "lm-loss-validation/valid/lm loss validation" \; +find tensorboard -name "*.tfevents*" -exec tb-rename-events.py {} "lm loss validation ppl" "lm-loss-validation/valid/lm loss validation ppl" \; +``` +It surely can be made more efficient by rewriting each file only once, but that's good enough for an occasional use. + + +Important: This script wants CUDA or it'll fail! So run it on pre-post And it also needs a recent `tensorboard`, so use `start-prod`. +``` +srun --partition=prepost --cpus-per-task=10 -A six@cpu --time=3:00:00 --pty bash --rcfile $six_ALL_CCFRWORK/start-prod +tb-rename-events.py ... +``` + +The script itself is extremely slow. It could be optimized to run faster to do all conversions at once, so loading each file just once. + + +## Megatron-Turing NLG 530B + +A timely blog post from MSFT about their successful training of a 530B model which also use Megatron-Deepspeed: +https://www.microsoft.com/en-us/research/blog/using-deepspeed-and-megatron-to-train-megatron-turing-nlg-530b-the-worlds-largest-and-most-powerful-generative-language-model/ + +One key difference with our current experiment is that they use higher quality datasets, (as we have discovered OSCAR is full of garbage randomly generated data), and they have further filtered it out to leave only very high quality content. + +The other important difference is that bf16 mixed precision was used instead of fp16, which lends to a much more stable training. + +Apparently the training configuration was the same as Megatron-LM's published paper - the table at: +https://github.com/NVIDIA/Megatron-LM#readme + +Reconstructing the published details to Megatron script: + +``` +NLAYERS=105 +NHIDDEN=20480 +NHEADS=128 + +SEQ_LEN=2048 + +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=1920 + +TP_SIZE=8 +PP_SIZE=35 + + --rampup-batch-size 32 32 6_000_000 (over 12B tokens, 6M samples) + --lr-warmup-samples 500_000 (1B tokens, 500M samples) + +``` + + +The Megatron-Turing NLG 530B model took 3 months to train on over 2K A100 GPUs on the NVIDIA Selene Supercomputer, consuming over 3 million GPU hours. + + +## Experiment 11 + + +First roll back to Exp 8 config as we got the best loss there. + + +``` + --lr 3e-5 \ + --lr-warmup-samples 300_000 \ +``` + +This experiment tests the `--init-method-std` - until now we were using the default setting of `0.02`. + +`--init-method-std 0.006` + +We derived this from: + +`0.00587220219514703 = sqrt(2/(11600*5))` (from the ScaleNorm paper https://arxiv.org/abs/1910.05895) + +inject a new setting: +``` +perl -pi -e 's|--loss-scale 12|--loss-scale 12 \\\n --init-method-std 0.006|msg' *slurm +``` + +revert parts to Exp 8: +``` +perl -pi -e 's|--lr 1e-5|--lr 3e-5|' *slurm +perl -pi -e 's|--lr-warmup-samples 1_000_000|--lr-warmup-samples 300_000|' *slurm +``` + +So far it looks like a breakthrough and we are training well and have already gone through 3 spikes from which it quickly recovered! + +![tr8-104B-exp-11.png](images/tr8-104B-exp-11.png) + + +### Analyzing Spike + +At iteration 11447 we registered a huge spike: +``` + iteration 11446/ 159576 | consumed samples: 1255856 | elapsed time per iteration (ms): 80367.0 | learning rate: 3.000E-05 | global batch size: 432 | lm loss: 3.062589E+00 | loss scale: 65536.0 | grad norm: 12952.562 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) + iteration 11447/ 159576 | consumed samples: 1256288 | elapsed time per iteration (ms): 78599.8 | learning rate: 3.000E-05 | global batch size: 432 | lm loss: 3.114286E+00 | loss scale: 65536.0 | grad norm: 262623.331 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) + iteration 11448/ 159576 | consumed samples: 1256720 | elapsed time per iteration (ms): 76051.2 | learning rate: 3.000E-05 | global batch size: 432 | lm loss: 7.712340E+00 | loss scale: 65536.0 | grad norm: 254699.302 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) +``` +which is now taking a long time to come down from. + +To retrieve data around that spike (3 before, 1 after): + +``` +source $six_ALL_CCFRWORK/code/tr8-104B/bigscience/train/tr8-104B-wide/start-tr8-104B + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr8-104B/Megatron-DeepSpeed-tr8-104B + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document + +cd $MEGATRON_DEEPSPEED_REPO + +SEQ_LEN=2048 +python tools/sample_idxs_to_text.py \ + --print-text \ + --sample-id-range 1255424 1257152 \ + --seed 43 \ + --train-samples 300_000_000 \ + --seq-length $SEQ_LEN \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --tokenizer-type GPT2BPETokenizer \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --output-file exp11-spike-data-300000000ns_2048sl_43s-1255424-1257152.txt +``` +and to run: +``` +bash extract.sh +``` + + + +## Adding support for an 8-bit optimizer: bitsandbytes + +Seungju Han integrated the 8-bit optimizer into Meg-DS: + +* paper https://arxiv.org/abs/2110.02861 +* library https://github.com/facebookresearch/bitsandbytes +* video https://www.youtube.com/watch?v=IxrlHAJtqKE + +Not sure about the exact percentage of memory it should save - it takes us from 8 bytes to 2 bytes for Adam states (x params). As there are 2 state variables of 4 bytes each, and then each is reduced to 1 byte. So going from needing 18 bytes per param for optim/grad/weights (training) to 12 bytes. And the paper says it manages to do the optim work faster than fp32. + +To activate it just add `--use-bnb-optimizer` after manually installing `pip install bitsandbytes-cuda113` after editing the install line to use the correct cuda version for your system. + +Sam Shleifer adds on twitter: + +> I think it also has a regularizing effect/might need higher learning rate, but haven't investigated this thoroughly enough to include in the paper. + + +## Expert wisdom + +Conglong Li: The first `LR warmup*3` steps is always unstable for large pretraining (w/ or w/o CL). + +> First, we observed that the l1 norm and max element of Adam's variance term are correlated with training stability. Second, we observed that the variance term only becomes stable after around LR warmup *3 steps. An example is in our paper https://arxiv.org/pdf/2108.06084.pdf figure 3e 3f, where the LR warmup is 3K step. Not in the paper, but we also measured the variance term for another model with different LR warmup, and the observation is consistent. + +## 2M backslash-only samples in our dataset + +Huu Nguyen discovered that there are huge records with just backslashes in OSCAR-en. So we set out to look at their occurence: + +``` +$ gunzip -c oscar-en-shuffled.jsonl.gz | fgrep '\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' | tee data-with-many-slashes.txt +$ wc -l data-with-many-slashes.txt +6318 data-with-many-slashes.txt +``` + +So 6318 records with long sections of backslashes. Some of them are 1M character long! + +Let's look closely: + +``` +$ perl -lne 'm|(\\{10000,})| && print length $1' data-with-many-slashes.txt | wc -l +4245 +$ perl -lne 'm|(\\{10000,})| && print length $1' data-with-many-slashes.txt | grep 1048574 | wc -l +3835 +``` + +So, 4245 records with 10k+ of backslashes, and 3835 records with 1M backslashes. + +Remember that this then gets split up into `SEQLEN=2048` chunks, so each 1M-long record becomes 512 samples of 2048 tokens. Thus there are at least 2M samples in the Meg-LM pre-processed OSCAR-en dataset that are made of pure backslashes. + +My suspicion is that OSCAR downloaded a single webpage which was comprised of say 4B backslashes. It then happily sliced it into 1M-long records (which I deduce is its max doc length) and thus introduced thousands of records of just backslashes. + + +Checking that the original indeed contains these records: + +- Download the dataset +``` +python -c "from datasets import load_dataset; load_dataset('oscar', 'unshuffled_deduplicated_en', split='train', keep_in_memory=False, cache_dir='cache')" +``` + +- Check the original records: +``` +cd cache/downloads +find . -type f -size +50k | xargs -n1 gunzip -c | fgrep -a '\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' | tee data-with-many-slashes.txt +``` + +- Validate: + +``` +$ perl -lne 'm|(\\{10000,})| && print length $1' data-with-many-slashes.txt | wc -l +4245 +``` +so getting the same result as shuffled, so we indeed know the issue is with the original dataset. +``` +perl -lne 'm|(\\{10000,})| && print length $1' data-with-many-slashes.txt | sort -V +``` + +The largest number this time is `524287` - half the one from the shuffled dataset `1048574`. + +I first thought it was a bug somewhere, but this is just json encoding, so it escapes each backslash. That's why in the original it's 0.5M backslashes and in the processed jsonl files it's 1M backslashes. + +``` +$ echo 'this is \\\\ test' > in +$ python -c 'import json; f = open("in", "r"); t="".join([l for l in f]); print(t); print(json.dumps(t))' +this is \\\\ test +"this is \\\\\\\\ test\n" +``` + +So then we only have 1M backslash-only samples in our dataset and not 2M. + + +## Analysing the spikes with custom data + + +We are going to take the last checkpoint just before the spike (`global_step11100`) and try to feed it small custom-made subset datasets from the range of data that we know that caused the spike. + +First we need to hack Megatron to allow us to do that: + +``` +diff --git a/megatron/checkpointing.py b/megatron/checkpointing.py +index f7328dc..7b5f0ed 100644 +--- a/megatron/checkpointing.py ++++ b/megatron/checkpointing.py +@@ -362,6 +362,9 @@ def load_checkpoint(model, optimizer, lr_scheduler, load_arg='load', strict=True + else: + print_rank_0('could not find arguments in the checkpoint ...') + ++ args.consumed_train_samples = 0 ++ args.consumed_valid_samples = 0 ++ iteration = 0 +``` + +Now for example we can extract just the backslash records (done earlier), make a new dataset: + + +We can also create one dataset with 0.5M backslashes each: +``` +perl -le 'BEGIN{$x="\\"x524286} for (1..4000) { print qq[{"id": $_, "text": "$x"}] }' > data-with-only-slashes.jsonl +``` +now to preprocess: +``` +input=data/data-with-only-slashes.jsonl +python tools/preprocess_data.py \ + --input $input \ + --output-prefix data/data-with-only-slashes \ + --dataset-impl mmap \ + --tokenizer-type GPT2BPETokenizer \ + --merge-file data/gpt2-merges.txt \ + --vocab data/gpt2-vocab.json \ + --append-eod \ + --workers 4 + +``` + +Now we have to set the `latest` to the checkpoint of our desire `global_step11100`, so that it'll resume from it. + +Now we use the same slurm script, but we have to tweak it to manually set GBS at the time of this iteration which we can see from the log file (`384`), we should also disable the batch size rampup. Finally we set the `train-samples` to a much lower number - say `3000`, which should give us a few iterations of runway. + +So: +``` +-GLOBAL_BATCH_SIZE=2048 ++GLOBAL_BATCH_SIZE=384 +- --rampup-batch-size 16 16 6_000_000 \ +- --train-samples 300_000_000 \ ++ --train-samples 3000 \ +``` + +Finally, we just need to tweak the script to not overwrite our normal checkpoints when saving, so we will set it save elsewhere. Same goes for the logs and tensorboard files. + +``` +PREFIX=data-with-only-slashes + +CHECKPOINT_LOAD_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr8-104B-data-study/checkpoints + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr8-104B-data-study/$PREFIX +CHECKPOINT_SAVE_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr8-104B-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +LOGS_PATH=$REPO_PATH/logs +mkdir -p $LOGS_PATH + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/${PREFIX}_text_document +[...] + + --save $CHECKPOINT_SAVE_PATH \ + --load $CHECKPOINT_LOAD_PATH \ +``` + + + +To peek inside a specific iteration we just do a dump of that sample range as before. Let's get the 3rd iteration here: + +``` +source $six_ALL_CCFRWORK/code/tr8-104B/bigscience/train/tr8-104B-wide/start-tr8-104B + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr8-104B/Megatron-DeepSpeed-tr8-104B-data-study + +PREFIX=data-with-only-slashes + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$MEGATRON_DEEPSPEED_REPO/data/${PREFIX}_text_document + +cd $MEGATRON_DEEPSPEED_REPO + +SEQ_LEN=2048 +python tools/sample_idxs_to_text.py \ + --print-text \ + --sample-id-range 768 1152\ + --seed 43 \ + --train-samples 3000 \ + --seq-length $SEQ_LEN \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --tokenizer-type GPT2BPETokenizer \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --output-file ${PREFIX}-3000ns_2048sl_43s-768-1152.txt +``` + +XXX: run an experiment on the sample range where the spike has occurred, but alone. 1256288-1256720. + +the spike iteration consumed 1256720 samples, so we need to hack Meg to set the consumed samples to 1256288 +and set GBS=432 and run from there. We hope to reproduce the same spike but force it earlier. + +``` + args.consumed_train_samples = 1256288 +``` +so probably need to add a new cl arg `--override-consumed-train-samples`. + + +### Additional sub-experiments to run on Exp 11 + + + +Exp 11: +- instrument Meg to skip data +- do an experiment with skipping some iterations and resume from there - see if the spike repeats +- do an experiment with the same data and see if it's reproducible +- would be interesting to find which batches are causing this; the ones at the spike or the ones before it + + + +## Experiment 12 + +Experiment 11 was a breakthrough, now we want to see if we can use a higher max LR, so that we could train faster. And also to be able to compare to the 13B scaling laws. + +Therefore for this experiment we are going back to the same settings as 13B for these 2 settings: +``` + --lr 6e-5 \ + --lr-warmup-samples 216_320 \ +``` +It will also make it easier to compare the 2 trainings. + + +``` +perl -pi -e 's|--lr 3e-5|--lr 6e-5|' *slurm +perl -pi -e 's|--lr-warmup-samples 300_000|--lr-warmup-samples 216_320|' *slurm +``` + +there was a similar spike to Exp 11 but some 2.5k iterations sooner: +``` +iteration 8738/ 159576 | consumed samples: 488048 | elapsed time per iteration (ms): 61947.6 | learning rate: 6.000E-05 | global batch size: 176 | lm loss: 3.317482E+00 | loss scale: 65536.0 | grad norm: 231817.368 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) + iteration 8739/ 159576 | consumed samples: 488224 | elapsed time per iteration (ms): 59158.7 | learning rate: 6.000E-05 | global batch size: 176 | lm loss: 3.417051E+00 | loss scale: 65536.0 | grad norm: 89608.482 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) + iteration 8740/ 159576 | consumed samples: 488400 | elapsed time per iteration (ms): 59665.7 | learning rate: 6.000E-05 | global batch size: 176 | lm loss: 3.682950E+00 | loss scale: 65536.0 | grad norm: 1490753.218 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) + iteration 8741/ 159576 | consumed samples: 488576 | elapsed time per iteration (ms): 60924.4 | learning rate: 6.000E-05 | global batch size: 176 | lm loss: 7.267764E+00 | loss scale: 65536.0 | grad norm: 3273877.676 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) + iteration 8742/ 159576 | consumed samples: 488752 | elapsed time per iteration (ms): 61784.1 | learning rate: 6.000E-05 | global batch size: 176 | lm loss: 4.130868E+00 | loss scale: 65536.0 | grad norm: 1839649.510 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) + iteration 8743/ 159576 | consumed samples: 488928 | elapsed time per iteration (ms): 61656.6 | learning rate: 6.000E-05 | global batch size: 176 | lm loss: 7.596509E+00 | loss scale: 65536.0 | grad norm: 1369067.435 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) +``` + + +and then training diverged: +``` + iteration 9026/ 159576 | consumed samples: 540464 | elapsed time per iteration (ms): 62075.5 | learning rate: 6.000E-05 | global batch size: 192 | lm loss: 7.024580E+00 | loss scale: 16384.0 | grad norm: 20609.780 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) + iteration 9027/ 159576 | consumed samples: 540656 | elapsed time per iteration (ms): 63274.6 | learning rate: 6.000E-05 | global batch size: 192 | loss scale: 8192.0 | grad norm: 20609.780 | num zeros: 0.0 | number of skipped iterations: 0 | number of nan iterations: 0 | +time (ms) +``` + +![tr8-104B-exp-12.png](images/tr8-104B-exp-12.png) + + + +## Things to try + + +1. Trying layernorm in Embedding.forward, since we saw a much more stable training with BNB which does that. + +but this can't be done once the training has started, since we don't have the weights for layer norm. + +2. residual fp32 + +The proposed `--fp32-residual-connection` can't be enabled once the training started + +3. Samyam: + +- linear layers in fp16 which is the where the most fp16 speed saving is happening. + +But it'd be very cheap to switch: + +- logits to softmax in fp32 +- attention softmax in fp32 + +The proposed `--attention-softmax-in-fp32` requires `--no-query-key-layer-scaling` + +The proposed `--accumulate-allreduce-grads-in-fp32` - waiting to be tested + + + +Continued in the 2nd set of experiments [chronicles b](../tr8b-104B-ml/chronicles.md) + + +stopped at Date: 2021-11-07 diff --git a/train/tr8-104B-wide/start-tr8-104B b/train/tr8-104B-wide/start-tr8-104B new file mode 100644 index 0000000000000000000000000000000000000000..7246352297b9abe06d9f6a4280315c2342728b3b --- /dev/null +++ b/train/tr8-104B-wide/start-tr8-104B @@ -0,0 +1,57 @@ +# This is a python production script for JZ / tr1-13B training +# +# Activate with: +# +# source ./start-tr1-13B +# +# + +# # if this session isn't run via a login shell, which is the case when running a +# # command which is not shell via ssh, the bash function `module` will be missing. +# # so work around it by emulating part of the login shell that loads modules environment +# if [ -z $(type -t module) ] +# then +# . /etc/profile.d/z_modules.sh +# fi +module purge +module load pytorch-gpu/py3/1.8.1 +module load nvtop git git-lfs github-cli mc + +# git prompt +export GIT_PROMPT_ONLY_IN_REPO=0; +export GIT_PROMPT_THEME="JZPRod" +source $six_ALL_CCFRWORK/envs/.bash-git-prompt/gitprompt.sh + +# We are using common disk spaces for datasets, caches, and experiment dumps: +# +#- Code, cache and datasets -> `$six_ALL_CCFRWORK/cache_dir` and ``$six_ALL_CCFRWORK/datasets` +#- Experiment dumps -> `$six_ALL_CCFRWORK/experiments` + +# specific caches + +export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models +export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets +export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules +export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics + +export DATASETS_CUSTOM=$six_ALL_CCFRWORK/datasets-custom + +### CONDA ### + +# >>> conda initialize >>> +# !! Contents within this block are managed by 'conda init' !! +__conda_setup="$('/gpfslocalsup/pub/anaconda-py3/2020.02/bin/conda' 'shell.bash' 'hook' 2> /dev/null)" +if [ $? -eq 0 ]; then + eval "$__conda_setup" +else + if [ -f "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" ]; then + . "/gpfslocalsup/pub/anaconda-py3/2020.02/etc/profile.d/conda.sh" + else + export PATH="/gpfslocalsup/pub/anaconda-py3/2020.02/bin:$PATH" + fi +fi +unset __conda_setup +# <<< conda initialize <<< + +export CONDA_ENVS_PATH=$six_ALL_CCFRWORK/conda +conda activate tr1-13B diff --git a/train/tr8-104B-wide/tb-rename-events.py b/train/tr8-104B-wide/tb-rename-events.py new file mode 100644 index 0000000000000000000000000000000000000000..9687ce5d5b78c7da295359bb1d3ba3600f2c286a --- /dev/null +++ b/train/tr8-104B-wide/tb-rename-events.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# this script renames event names in tensorboard log files +# it does the rename in place (so make back ups!) +# +# example: +# +# find . -name "*.tfevents*" -exec tb-rename-events.py {} "iteration-time" "iteration-time/iteration-time" \; +# +# more than one old tag can be remapped to one new tag - use `;` as a separator: +# +# tb-rename-events.py events.out.tfevents.1 "training loss;validation loss" "loss" +# +# this script is derived from https://stackoverflow.com/a/60080531/9201239 +# +# Important: this script requires CUDA environment. + +import shlex +import sys +from pathlib import Path +import os +# avoid using the GPU +os.environ['CUDA_VISIBLE_DEVICES'] = '' +# disable logging +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import tensorflow as tf +from tensorflow.core.util.event_pb2 import Event + +def rename_events(input_file, old_tags, new_tag): + new_file = input_file + ".new" + # Make a record writer + with tf.io.TFRecordWriter(new_file) as writer: + # Iterate event records + for rec in tf.data.TFRecordDataset([input_file]): + # Read event + ev = Event() + ev.MergeFromString(rec.numpy()) + # Check if it is a summary + #print(ev) + if ev.summary: + # Iterate summary values + for v in ev.summary.value: + #print(v) + # Check if the tag should be renamed + if v.tag in old_tags: + # Rename with new tag name + v.tag = new_tag + writer.write(ev.SerializeToString()) + os.rename(new_file, input_file) + +def rename_events_dir(input_file, old_tags, new_tag): + # Write renamed events + rename_events(input_file, old_tags, new_tag) + +if __name__ == '__main__': + if len(sys.argv) != 4: + print(f'{sys.argv[0]} ', + file=sys.stderr) + sys.exit(1) + input_file, old_tags, new_tag = sys.argv[1:] + print(input_file, shlex.quote(old_tags), shlex.quote(new_tag)) + old_tags = old_tags.split(';') + rename_events_dir(input_file, old_tags, new_tag) diff --git a/train/tr8-104B-wide/tr8-104B-hub-sync-logs.slurm b/train/tr8-104B-wide/tr8-104B-hub-sync-logs.slurm new file mode 100644 index 0000000000000000000000000000000000000000..ba66c2c91556920eb631ecf7f2f91e6d16532d11 --- /dev/null +++ b/train/tr8-104B-wide/tr8-104B-hub-sync-logs.slurm @@ -0,0 +1,25 @@ +#!/bin/bash +#SBATCH --job-name=tr8-104B-hub-sync-logs # job name +#SBATCH --ntasks=1 # number of MP tasks +#SBATCH --nodes=1 # number of nodes +#SBATCH --cpus-per-task=1 # number of cores per task +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --time=2:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --partition=compil +#SBATCH --account=six@cpu + +echo "START TIME: $(date)" + +source $six_ALL_CCFRWORK/code/tr8-104B/bigscience/train/tr8-104B-wide/start-tr8-104B +module load git-lfs + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr8-104B +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr8-104B-logs + +BIG_SCIENCE_REPO_PATH=$six_ALL_CCFRWORK/code/tr8-104B/bigscience + +$BIG_SCIENCE_REPO_PATH/tools/hub-sync.py --repo-path $REPO_PATH --patterns 'tensorboard/*tfevents*' 'codecarbon/*.csv' 'logs/*.txt' -d + +echo "END TIME: $(date)" diff --git a/train/tr8-104B-wide/tr8-104B-slurm-status.slurm b/train/tr8-104B-wide/tr8-104B-slurm-status.slurm new file mode 100644 index 0000000000000000000000000000000000000000..b5f14b90cba86eca8e55a7608ba8ac9432815ae9 --- /dev/null +++ b/train/tr8-104B-wide/tr8-104B-slurm-status.slurm @@ -0,0 +1,26 @@ +#!/bin/bash +#SBATCH --job-name=tr8-104B-slurm-status # job name +#SBATCH --ntasks=1 # number of MP tasks +#SBATCH --nodes=1 # number of nodes +#SBATCH --cpus-per-task=1 # number of cores per task +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --time=0:30:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --partition=compil +#SBATCH --account=six@cpu + +echo "START TIME: $(date)" + + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr8-104B +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr8-104B-logs +LOGS_PATH=$REPO_PATH/logs + +MAIN_LOG_FILE=$LOGS_PATH/main_log.txt +BIG_SCIENCE_REPO_PATH=$six_ALL_CCFRWORK/code/tr8-104B/bigscience +WATCH_SLURM_NAME=tr8-104B + +$BIG_SCIENCE_REPO_PATH/tools/slurm-status.py --job-name $WATCH_SLURM_NAME 2>&1 | tee -a $MAIN_LOG_FILE + +echo "END TIME: $(date)" diff --git a/train/tr8-104B-wide/tr8b-104B-pile.slurm b/train/tr8-104B-wide/tr8b-104B-pile.slurm new file mode 100644 index 0000000000000000000000000000000000000000..c7742bbd850497cad6bdd3ec99999691aec731ad --- /dev/null +++ b/train/tr8-104B-wide/tr8b-104B-pile.slurm @@ -0,0 +1,171 @@ +#!/bin/bash +#SBATCH --job-name=tr8b-104B-pile +#SBATCH --constraint=v100-32g +#SBATCH --nodes=128 +#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! +#SBATCH --cpus-per-task=40 # number of cores per tasks +#SBATCH --hint=nomultithread # we get physical cores not logical +#SBATCH --gres=gpu:4 # number of gpus +#SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS) +#SBATCH --output=%x-%j.out # output file name +#SBATCH --account=six@v100 + +set -x -e + +source $six_ALL_CCFRWORK/code/tr8-104B/bigscience/train/tr8-104B-wide/start-tr8-104B + +echo "START TIME: $(date)" + +DATA_OUTPUT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr8b-104B-pile +CHECKPOINT_PATH=$DATA_OUTPUT_PATH/checkpoints +REPO_PATH=$DATA_OUTPUT_PATH/tr8b-104B-pile-logs +TENSORBOARD_PATH=$REPO_PATH/tensorboard +CODECARBON_PATH=$REPO_PATH/codecarbon +LOGS_PATH=$REPO_PATH/logs + +MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr8-104B/Megatron-DeepSpeed-tr8-104B + +VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json +MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt +DATA_PATH=$six_ALL_CCFRWORK/datasets/pile/pile_text_document + +cd $MEGATRON_DEEPSPEED_REPO + +MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) +MASTER_PORT=6000 + +GPUS_PER_NODE=4 +NNODES=128 # switch to 128 +TP_SIZE=4 # always fixed to the size of a single node +PP_SIZE=8 # NLAYERS must be a multiple of PP_SIZE here +#DP_SIZE=$NNODES*$GPUS_PER_NODE/($PP_SIZE*$TP_SIZE) # will get derived automatically by trainer + +# GLOBAL_BATCH_SIZE has to be divisible by MICRO_BATCH_SIZE*DP_size +# GLOBAL_BATCH_SIZE=$(($MICRO_BATCH_SIZE*$GAS*$DP_SIZE)) - GAS is auto-derived by deepspeed +MICRO_BATCH_SIZE=1 +GLOBAL_BATCH_SIZE=2048 + +NLAYERS=32 +NHIDDEN=16384 +NHEADS=32 +SEQ_LEN=2048 +VOCAB_SIZE=50257 + +SAVE_INTERVAL=1500 + +OPTIMIZER_ARGS=" \ + --optimizer adam \ + --adam-beta1 0.9 \ + --adam-beta2 0.999 \ + --adam-eps 1e-8 \ + --lr 6e-5 \ + --min-lr 6e-6 \ + --lr-decay-style cosine \ + --lr-decay-samples 126_953_125 \ + --lr-warmup-samples 216_320 \ + --clip-grad 1.0 \ + --weight-decay 1e-1 \ + " + +EXIT_OPTS=" \ + --exit-duration-in-mins 1190 \ + " + +GPT_ARGS=" \ + --num-layers $NLAYERS \ + --hidden-size $NHIDDEN \ + --num-attention-heads $NHEADS \ + --seq-length $SEQ_LEN \ + --max-position-embeddings $SEQ_LEN \ + --micro-batch-size $MICRO_BATCH_SIZE \ + --rampup-batch-size 16 16 6_000_000 \ + --global-batch-size $GLOBAL_BATCH_SIZE \ + --train-samples 300_000_000 \ + --vocab-file $VOCAB_FILE \ + --merge-file $MERGE_FILE \ + --loss-scale 12 \ + --init-method-std 0.006 \ + --fp16 \ + --checkpoint-activations \ + --seed 43 \ + $OPTIMIZER_ARGS \ + $EXIT_OPTS \ + " + +OUTPUT_ARGS=" \ + --log-interval 1 \ + --save-interval $SAVE_INTERVAL \ + --eval-interval 1000 \ + --eval-iters 5 \ + --codecarbon-dir $CODECARBON_PATH \ + --tensorboard-dir $TENSORBOARD_PATH \ + --tensorboard-queue-size 5 \ + --log-timers-to-tensorboard \ + --log-batch-size-to-tensorboard \ + --log-validation-ppl-to-tensorboard \ + " + +ZERO_STAGE=1 + +config_json="./ds_config.$SLURM_JOBID.json" + +# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() +cat < $config_json +{ + "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, + "train_batch_size": $GLOBAL_BATCH_SIZE, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": $ZERO_STAGE + }, + "fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 500, + "hysteresis": 2, + "min_loss_scale": 1, + "initial_scale_power": 12 + }, + "steps_per_print": 2000, + "wall_clock_breakdown": false +} +EOT + + +DEEPSPEED_ARGS=" \ + --deepspeed \ + --deepspeed_config ${config_json} \ + --zero-stage ${ZERO_STAGE} \ + --deepspeed-activation-checkpointing \ + " + +export LAUNCHER="python -u -m torch.distributed.launch \ + --nproc_per_node $GPUS_PER_NODE \ + --nnodes $NNODES \ + --master_addr $MASTER_ADDR \ + --master_port $MASTER_PORT \ + " + +export CMD=" \ + `pwd`/pretrain_gpt.py \ + --tensor-model-parallel-size $TP_SIZE \ + --pipeline-model-parallel-size $PP_SIZE \ + $GPT_ARGS \ + $OUTPUT_ARGS \ + --save $CHECKPOINT_PATH \ + --load $CHECKPOINT_PATH \ + --data-path $DATA_PATH \ + --data-impl mmap \ + --split 949,50,1 \ + --distributed-backend nccl \ + $DEEPSPEED_ARGS \ + " + +echo $CMD + +# to debug - add echo (it exits and prints what it would have launched) +clear; srun --jobid $SLURM_JOBID bash -c '$LAUNCHER --node_rank $SLURM_PROCID $CMD' 2>&1 | tee -a $LOGS_PATH/main_log.txt + +echo "END TIME: $(date)" + +#