applied-ai-018 commited on
Commit
241bd09
·
verified ·
1 Parent(s): d1df0f5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. en_hn_south/final.json +3 -0
  3. sangraha/tok_sangraha/tok_jobs/tokenizer_0.yaml +122 -0
  4. sangraha/tok_sangraha/tok_jobs/tokenizer_1.yaml +122 -0
  5. sangraha/tok_sangraha/tok_jobs/tokenizer_10.yaml +122 -0
  6. sangraha/tok_sangraha/tok_jobs/tokenizer_11.yaml +122 -0
  7. sangraha/tok_sangraha/tok_jobs/tokenizer_12.yaml +122 -0
  8. sangraha/tok_sangraha/tok_jobs/tokenizer_13.yaml +122 -0
  9. sangraha/tok_sangraha/tok_jobs/tokenizer_14.yaml +122 -0
  10. sangraha/tok_sangraha/tok_jobs/tokenizer_15.yaml +122 -0
  11. sangraha/tok_sangraha/tok_jobs/tokenizer_16.yaml +122 -0
  12. sangraha/tok_sangraha/tok_jobs/tokenizer_17.yaml +122 -0
  13. sangraha/tok_sangraha/tok_jobs/tokenizer_18.yaml +122 -0
  14. sangraha/tok_sangraha/tok_jobs/tokenizer_19.yaml +122 -0
  15. sangraha/tok_sangraha/tok_jobs/tokenizer_2.yaml +122 -0
  16. sangraha/tok_sangraha/tok_jobs/tokenizer_20.yaml +122 -0
  17. sangraha/tok_sangraha/tok_jobs/tokenizer_21.yaml +122 -0
  18. sangraha/tok_sangraha/tok_jobs/tokenizer_22.yaml +122 -0
  19. sangraha/tok_sangraha/tok_jobs/tokenizer_23.yaml +122 -0
  20. sangraha/tok_sangraha/tok_jobs/tokenizer_24.yaml +122 -0
  21. sangraha/tok_sangraha/tok_jobs/tokenizer_25.yaml +122 -0
  22. sangraha/tok_sangraha/tok_jobs/tokenizer_26.yaml +122 -0
  23. sangraha/tok_sangraha/tok_jobs/tokenizer_27.yaml +122 -0
  24. sangraha/tok_sangraha/tok_jobs/tokenizer_28.yaml +122 -0
  25. sangraha/tok_sangraha/tok_jobs/tokenizer_29.yaml +122 -0
  26. sangraha/tok_sangraha/tok_jobs/tokenizer_3.yaml +122 -0
  27. sangraha/tok_sangraha/tok_jobs/tokenizer_30.yaml +122 -0
  28. sangraha/tok_sangraha/tok_jobs/tokenizer_31.yaml +122 -0
  29. sangraha/tok_sangraha/tok_jobs/tokenizer_32.yaml +122 -0
  30. sangraha/tok_sangraha/tok_jobs/tokenizer_33.yaml +122 -0
  31. sangraha/tok_sangraha/tok_jobs/tokenizer_34.yaml +122 -0
  32. sangraha/tok_sangraha/tok_jobs/tokenizer_35.yaml +122 -0
  33. sangraha/tok_sangraha/tok_jobs/tokenizer_36.yaml +122 -0
  34. sangraha/tok_sangraha/tok_jobs/tokenizer_37.yaml +122 -0
  35. sangraha/tok_sangraha/tok_jobs/tokenizer_38.yaml +122 -0
  36. sangraha/tok_sangraha/tok_jobs/tokenizer_39.yaml +122 -0
  37. sangraha/tok_sangraha/tok_jobs/tokenizer_4.yaml +122 -0
  38. sangraha/tok_sangraha/tok_jobs/tokenizer_40.yaml +122 -0
  39. sangraha/tok_sangraha/tok_jobs/tokenizer_41.yaml +122 -0
  40. sangraha/tok_sangraha/tok_jobs/tokenizer_42.yaml +122 -0
  41. sangraha/tok_sangraha/tok_jobs/tokenizer_43.yaml +122 -0
  42. sangraha/tok_sangraha/tok_jobs/tokenizer_44.yaml +122 -0
  43. sangraha/tok_sangraha/tok_jobs/tokenizer_45.yaml +122 -0
  44. sangraha/tok_sangraha/tok_jobs/tokenizer_5.yaml +122 -0
  45. sangraha/tok_sangraha/tok_jobs/tokenizer_6.yaml +122 -0
  46. sangraha/tok_sangraha/tok_jobs/tokenizer_7.yaml +122 -0
  47. sangraha/tok_sangraha/tok_jobs/tokenizer_8.yaml +122 -0
  48. sangraha/tok_sangraha/tok_jobs/tokenizer_9.yaml +122 -0
  49. sangraha/tok_sangraha/tok_jobs1/tokenizer_10.yaml +122 -0
  50. sangraha/tok_sangraha/tok_jobs1/tokenizer_11.yaml +122 -0
.gitattributes CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ en_hn_south/final.json filter=lfs diff=lfs merge=lfs -text
en_hn_south/final.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf8f3c13bf07efae57befd0f1fc2ff78455dace4759b764c3312f5aeda806bd1
3
+ size 67119608
sangraha/tok_sangraha/tok_jobs/tokenizer_0.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-0
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_0.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_1.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-1
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_1.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_10.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-10
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_10.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_11.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-11
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_11.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_12.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-12
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_12.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_13.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-13
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_13.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_14.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-14
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_14.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_15.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-15
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_15.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_16.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-16
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_16.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_17.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-17
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_17.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_18.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-18
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_18.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_19.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-19
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_19.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_2.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-2
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_2.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_20.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-20
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_20.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_21.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-21
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_21.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_22.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-22
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_22.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_23.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-23
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_23.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_24.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-24
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_24.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_25.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-25
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_25.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_26.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-26
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_26.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_27.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-27
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_27.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_28.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-28
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_28.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_29.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-29
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_29.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_3.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-3
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_3.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_30.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-30
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_30.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_31.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-31
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_31.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_32.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-32
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_32.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_33.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-33
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_33.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_34.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-34
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_34.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_35.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-35
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_35.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_36.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-36
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_36.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_37.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-37
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_37.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_38.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-38
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_38.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_39.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-39
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_39.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_4.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-4
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_4.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_40.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-40
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_40.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_41.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-41
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_41.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_42.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-42
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_42.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_43.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-43
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_43.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_44.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-44
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_44.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_45.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-45
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_45.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_5.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-5
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_5.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_6.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-6
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_6.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_7.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-7
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_7.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_8.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-8
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_8.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs/tokenizer_9.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-9
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_9.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs1/tokenizer_10.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-10
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_10.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+
sangraha/tok_sangraha/tok_jobs1/tokenizer_11.yaml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-sangraha-11
5
+ spec:
6
+ slotsPerWorker: 8
7
+ runPolicy:
8
+ backoffLimit: 0
9
+ cleanPodPolicy: Running
10
+ mpiReplicaSpecs:
11
+ Launcher:
12
+ replicas: 1
13
+ template:
14
+ spec:
15
+ hostIPC: true
16
+ nodeSelector:
17
+ role: worker
18
+ volumes:
19
+ - name: pvc-wekafs-peacock
20
+ persistentVolumeClaim:
21
+ claimName: pvc-wekafs-peacock
22
+ containers:
23
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
24
+ name: llama2-3b-pretrain-launcher
25
+ imagePullPolicy: IfNotPresent
26
+ volumeMounts:
27
+ - name: pvc-wekafs-peacock
28
+ mountPath: /mnt/weka/peacock
29
+ command: ["/bin/bash", "-c"]
30
+ args:
31
+ - >-
32
+ sleep 10;
33
+ export SYNAPSE_VERSION="1.17.0";
34
+ export WORKER_DIR="/mnt/weka/peacock/training/llama3b";
35
+ export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed";
36
+ export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH;
37
+ export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt";
38
+
39
+ export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_11.sh
40
+
41
+ export HL_LLAMA_VER=2;
42
+ export HL_LLAMA_MODEL_SIZE=3;
43
+
44
+ export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all";
45
+ export HL_DATA_FILE_PREFIX="all";
46
+ export HL_DATASET_NAME=slimpajama-subset;
47
+ export HL_DP=16;
48
+ export HL_PP=2;
49
+ export HL_TP=4;
50
+ export HL_TRAIN_ITERS=240000;
51
+ export HL_SAVE_INTERVAL=1000;
52
+
53
+ #export HL_EXIT_INTERVAL=100;
54
+ #export HL_SAVE=0;
55
+
56
+ export HL_MICRO_BATCH=8;
57
+
58
+ export HL_TOKENIZER_TYPE=SentencePieceTokenizer;
59
+ export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k";
60
+
61
+ export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}";
62
+ export DEBUG=True;
63
+
64
+ set -exo pipefail;
65
+
66
+
67
+ $WORKER_DIR/launch/setup.sh;
68
+
69
+ mpirun --npernode 1 \
70
+ --tag-output \
71
+ --allow-run-as-root \
72
+ --prefix $MPI_ROOT \
73
+ -x MODEL_REQ=$MODEL_REQ \
74
+ -x WORKER_DIR=$WORKER_DIR \
75
+ -x MODEL_PATH=$MODEL_PATH \
76
+ -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \
77
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION \
78
+ $WORKER_DIR/launch/setup.sh;
79
+
80
+ cd $MODEL_PATH;
81
+
82
+ mpirun -npernode 8 \
83
+ --tag-output \
84
+ --allow-run-as-root \
85
+ --prefix $MPI_ROOT \
86
+ -x WORKER_DIR=$WORKER_DIR \
87
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
88
+ Worker:
89
+ replicas: 1
90
+ template:
91
+ spec:
92
+ runtimeClassName: habana
93
+ nodeSelector:
94
+ role: worker
95
+ volumes:
96
+ - name: pvc-wekafs-peacock
97
+ persistentVolumeClaim:
98
+ claimName: pvc-wekafs-peacock
99
+ hostIPC: true
100
+ containers:
101
+ - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest
102
+ name: llama2-3b-pretrain-worker
103
+ imagePullPolicy: IfNotPresent
104
+ securityContext:
105
+ capabilities:
106
+ add:
107
+ - SYSLOG
108
+ resources:
109
+ limits:
110
+ habana.ai/gaudi: 8
111
+ hugepages-2Mi: 300Gi
112
+ memory: 600Gi
113
+ cpu: 120
114
+ requests:
115
+ habana.ai/gaudi: 8
116
+ hugepages-2Mi: 300Gi
117
+ memory: 600Gi
118
+ cpu: 120
119
+ volumeMounts:
120
+ - name: pvc-wekafs-peacock
121
+ mountPath: /mnt/weka/peacock
122
+