diff --git a/.gitattributes b/.gitattributes index 1ef325f1b111266a6b26e0196871bd78baa8c2f3..2a9da2ca05a4b12a98103c10da43ea15bc58bfe0 100644 --- a/.gitattributes +++ b/.gitattributes @@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text # Video files - compressed *.mp4 filter=lfs diff=lfs merge=lfs -text *.webm filter=lfs diff=lfs merge=lfs -text +en_hn_south/final.json filter=lfs diff=lfs merge=lfs -text diff --git a/en_hn_south/final.json b/en_hn_south/final.json new file mode 100644 index 0000000000000000000000000000000000000000..dabeef153b453caf89f67a65cfa45aba5734f97e --- /dev/null +++ b/en_hn_south/final.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf8f3c13bf07efae57befd0f1fc2ff78455dace4759b764c3312f5aeda806bd1 +size 67119608 diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_0.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a0f02bfe05b3744bb9d5e234d6c4f309f8228fe4 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_0.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-0 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_0.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_1.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7aaff37bafe530b4a31c31893b136bba2a2922d8 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_1.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-1 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_1.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_10.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_10.yaml new file mode 100644 index 0000000000000000000000000000000000000000..636c09497b8c1685814f58a8dbfe767919fe5cd6 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_10.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-10 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_10.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_11.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_11.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a7d7c184f09bb10eb692ac2095c402ec54adffa4 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_11.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-11 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_11.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_12.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_12.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5331c70898eafbb67e5bf66a73dd7f58d96af3a2 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_12.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-12 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_12.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_13.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_13.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ce405645affac2e5575a11795b0aa06bda3f3fa1 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_13.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-13 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_13.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_14.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_14.yaml new file mode 100644 index 0000000000000000000000000000000000000000..509507cf69271e620e3a1504f8ba2a02b52dc06c --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_14.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-14 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_14.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_15.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_15.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7c6c52838b41d6b9b54d45b7494dfe895a340d2a --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_15.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-15 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_15.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_16.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_16.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a97aaac576dfae54ac872ea0f7a57c111bff2115 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_16.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-16 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_16.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_17.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_17.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1030e971b537feb1917b5293e263ee510ca3fa00 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_17.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-17 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_17.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_18.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_18.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e16a1fa7994a8bd46117ac977aa6b57194bcdcbf --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_18.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-18 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_18.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_19.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_19.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c649b0f433ce87a4f3a10c3cf3a11f1a1a701857 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_19.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-19 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_19.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_2.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4149c414529816bd912281ca17ef422fa02d650e --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_2.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-2 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_2.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_20.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_20.yaml new file mode 100644 index 0000000000000000000000000000000000000000..17b0f37a34e6c701825144ab3fb1202c62e7e17a --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_20.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-20 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_20.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_21.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_21.yaml new file mode 100644 index 0000000000000000000000000000000000000000..68b52f07afea14af5221a3f7f50ec44ea4bd9431 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_21.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-21 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_21.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_22.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_22.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bf58f72639fd81ef55f93dbeaa33e7615640e480 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_22.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-22 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_22.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_23.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_23.yaml new file mode 100644 index 0000000000000000000000000000000000000000..76919084b2760651650b8d84315ce149c3f1b316 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_23.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-23 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_23.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_24.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_24.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d41e9cdd0ad81c5de572ce973150903acc819713 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_24.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-24 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_24.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_25.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_25.yaml new file mode 100644 index 0000000000000000000000000000000000000000..55359758d680c30f86558e5ff1e9e338383c119c --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_25.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-25 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_25.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_26.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_26.yaml new file mode 100644 index 0000000000000000000000000000000000000000..557ff8280eabcd1d1d74b190bf4a2ae95cb9ab6d --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_26.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-26 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_26.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_27.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_27.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d882c955316cb68579030e7086e3d60c72114715 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_27.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-27 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_27.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_28.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_28.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e8391d2d9187dcdeb99cdf6f7fbe96af9db23617 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_28.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-28 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_28.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_29.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_29.yaml new file mode 100644 index 0000000000000000000000000000000000000000..472fc4dca51559786e659f829375e91863933ac8 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_29.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-29 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_29.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_3.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e2233cac20b878cc66d9ea0cd6e93481c60dfeab --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_3.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-3 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_3.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_30.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_30.yaml new file mode 100644 index 0000000000000000000000000000000000000000..051dc7963ad247e0d93c62fd8d06c66965a48895 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_30.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-30 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_30.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_31.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_31.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cddedd2c2f58e2ff467b8b067a0110ab4d4028a6 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_31.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-31 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_31.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_32.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_32.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e0c42bfd2f620093f14a1507661e7817093535ed --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_32.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-32 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_32.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_33.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_33.yaml new file mode 100644 index 0000000000000000000000000000000000000000..66212dfd602d22f72944f61675939ebf4b354480 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_33.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-33 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_33.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_34.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_34.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0aaa4e4320d279f006e0a16ad1b95a0f1f506ccd --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_34.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-34 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_34.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_35.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_35.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6b992acb28568c8a4ad9100774c0fd86944410ed --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_35.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-35 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_35.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_36.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_36.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5c89f2464aa44be011b232f84be6e39774d1a2c2 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_36.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-36 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_36.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_37.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_37.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4099c36367c608a87d06fc3a8911ff9c2a3d63af --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_37.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-37 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_37.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_38.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_38.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c3b61fc532f697636fad74adfad7a42973f4768b --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_38.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-38 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_38.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_39.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_39.yaml new file mode 100644 index 0000000000000000000000000000000000000000..faadfb00f3f113a2f426a1b90e5e3e3e26609a48 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_39.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-39 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_39.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_4.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_4.yaml new file mode 100644 index 0000000000000000000000000000000000000000..96fc8e973b3cbcff438df6992cc82a9a1463fb42 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_4.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-4 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_4.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_40.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_40.yaml new file mode 100644 index 0000000000000000000000000000000000000000..01070341dad4a47f14663c1652c4a004c5aeb836 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_40.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-40 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_40.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_41.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_41.yaml new file mode 100644 index 0000000000000000000000000000000000000000..952960e0f1d56aaa51a78a1a3a5965962378a5fc --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_41.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-41 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_41.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_42.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_42.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bc23fed34d89aaf63897cc0d7c080d8324c6a4f5 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_42.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-42 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_42.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_43.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_43.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5e2985b777387c27674b620d38be29ec13851f13 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_43.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-43 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_43.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_44.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_44.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b7d84b64813612b2f817be0b4ce54aca5e0b15eb --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_44.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-44 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_44.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_45.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_45.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2cca814bc4f43f7b93370c69f80ab234ca923086 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_45.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-45 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_45.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_5.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_5.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ee421c9512796abd4e3a778d53ad98fac4998fc9 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_5.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-5 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_5.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_6.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_6.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f2075365b7fc7662698d0a1c4d767606b2febf12 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_6.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-6 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_6.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_7.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_7.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fd8e589157b1cae6991d53a8395e041b98fdbe1f --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_7.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-7 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_7.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_8.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_8.yaml new file mode 100644 index 0000000000000000000000000000000000000000..104b9fdc3961e3d7fc76c37bd6dd826a2ae71d5f --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_8.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-8 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_8.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs/tokenizer_9.yaml b/sangraha/tok_sangraha/tok_jobs/tokenizer_9.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a365dfc62693278ffa9453663760e92a6dbd7a8c --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs/tokenizer_9.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-9 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_9.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs1/tokenizer_10.yaml b/sangraha/tok_sangraha/tok_jobs1/tokenizer_10.yaml new file mode 100644 index 0000000000000000000000000000000000000000..636c09497b8c1685814f58a8dbfe767919fe5cd6 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs1/tokenizer_10.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-10 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_10.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs1/tokenizer_11.yaml b/sangraha/tok_sangraha/tok_jobs1/tokenizer_11.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a7d7c184f09bb10eb692ac2095c402ec54adffa4 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs1/tokenizer_11.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-11 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_11.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs1/tokenizer_12.yaml b/sangraha/tok_sangraha/tok_jobs1/tokenizer_12.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5331c70898eafbb67e5bf66a73dd7f58d96af3a2 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs1/tokenizer_12.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-12 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_12.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs1/tokenizer_13.yaml b/sangraha/tok_sangraha/tok_jobs1/tokenizer_13.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ce405645affac2e5575a11795b0aa06bda3f3fa1 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs1/tokenizer_13.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-13 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_13.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs1/tokenizer_14.yaml b/sangraha/tok_sangraha/tok_jobs1/tokenizer_14.yaml new file mode 100644 index 0000000000000000000000000000000000000000..509507cf69271e620e3a1504f8ba2a02b52dc06c --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs1/tokenizer_14.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-14 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_14.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs1/tokenizer_15.yaml b/sangraha/tok_sangraha/tok_jobs1/tokenizer_15.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7c6c52838b41d6b9b54d45b7494dfe895a340d2a --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs1/tokenizer_15.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-15 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_15.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs1/tokenizer_16.yaml b/sangraha/tok_sangraha/tok_jobs1/tokenizer_16.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a97aaac576dfae54ac872ea0f7a57c111bff2115 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs1/tokenizer_16.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-16 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_16.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs1/tokenizer_17.yaml b/sangraha/tok_sangraha/tok_jobs1/tokenizer_17.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1030e971b537feb1917b5293e263ee510ca3fa00 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs1/tokenizer_17.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-17 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_17.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs1/tokenizer_18.yaml b/sangraha/tok_sangraha/tok_jobs1/tokenizer_18.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e16a1fa7994a8bd46117ac977aa6b57194bcdcbf --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs1/tokenizer_18.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-18 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_18.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs1/tokenizer_19.yaml b/sangraha/tok_sangraha/tok_jobs1/tokenizer_19.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c649b0f433ce87a4f3a10c3cf3a11f1a1a701857 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs1/tokenizer_19.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-19 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_19.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs3/tokenizer_30.yaml b/sangraha/tok_sangraha/tok_jobs3/tokenizer_30.yaml new file mode 100644 index 0000000000000000000000000000000000000000..051dc7963ad247e0d93c62fd8d06c66965a48895 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs3/tokenizer_30.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-30 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_30.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs3/tokenizer_35.yaml b/sangraha/tok_sangraha/tok_jobs3/tokenizer_35.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6b992acb28568c8a4ad9100774c0fd86944410ed --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs3/tokenizer_35.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-35 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_35.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs3/tokenizer_37.yaml b/sangraha/tok_sangraha/tok_jobs3/tokenizer_37.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4099c36367c608a87d06fc3a8911ff9c2a3d63af --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs3/tokenizer_37.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-37 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_37.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs4/tokenizer_0.yaml b/sangraha/tok_sangraha/tok_jobs4/tokenizer_0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a0f02bfe05b3744bb9d5e234d6c4f309f8228fe4 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs4/tokenizer_0.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-0 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_0.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs4/tokenizer_1.yaml b/sangraha/tok_sangraha/tok_jobs4/tokenizer_1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7aaff37bafe530b4a31c31893b136bba2a2922d8 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs4/tokenizer_1.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-1 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_1.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs4/tokenizer_2.yaml b/sangraha/tok_sangraha/tok_jobs4/tokenizer_2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4149c414529816bd912281ca17ef422fa02d650e --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs4/tokenizer_2.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-2 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_2.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs4/tokenizer_3.yaml b/sangraha/tok_sangraha/tok_jobs4/tokenizer_3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e2233cac20b878cc66d9ea0cd6e93481c60dfeab --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs4/tokenizer_3.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-3 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_3.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs4/tokenizer_4.yaml b/sangraha/tok_sangraha/tok_jobs4/tokenizer_4.yaml new file mode 100644 index 0000000000000000000000000000000000000000..96fc8e973b3cbcff438df6992cc82a9a1463fb42 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs4/tokenizer_4.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-4 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_4.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs4/tokenizer_40.yaml b/sangraha/tok_sangraha/tok_jobs4/tokenizer_40.yaml new file mode 100644 index 0000000000000000000000000000000000000000..01070341dad4a47f14663c1652c4a004c5aeb836 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs4/tokenizer_40.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-40 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_40.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs4/tokenizer_41.yaml b/sangraha/tok_sangraha/tok_jobs4/tokenizer_41.yaml new file mode 100644 index 0000000000000000000000000000000000000000..952960e0f1d56aaa51a78a1a3a5965962378a5fc --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs4/tokenizer_41.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-41 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_41.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs4/tokenizer_42.yaml b/sangraha/tok_sangraha/tok_jobs4/tokenizer_42.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bc23fed34d89aaf63897cc0d7c080d8324c6a4f5 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs4/tokenizer_42.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-42 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_42.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs4/tokenizer_43.yaml b/sangraha/tok_sangraha/tok_jobs4/tokenizer_43.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5e2985b777387c27674b620d38be29ec13851f13 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs4/tokenizer_43.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-43 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_43.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs4/tokenizer_44.yaml b/sangraha/tok_sangraha/tok_jobs4/tokenizer_44.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b7d84b64813612b2f817be0b4ce54aca5e0b15eb --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs4/tokenizer_44.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-44 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_44.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs4/tokenizer_45.yaml b/sangraha/tok_sangraha/tok_jobs4/tokenizer_45.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2cca814bc4f43f7b93370c69f80ab234ca923086 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs4/tokenizer_45.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-45 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_45.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs4/tokenizer_5.yaml b/sangraha/tok_sangraha/tok_jobs4/tokenizer_5.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ee421c9512796abd4e3a778d53ad98fac4998fc9 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs4/tokenizer_5.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-5 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_5.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs4/tokenizer_6.yaml b/sangraha/tok_sangraha/tok_jobs4/tokenizer_6.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f2075365b7fc7662698d0a1c4d767606b2febf12 --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs4/tokenizer_6.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-6 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_6.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs4/tokenizer_7.yaml b/sangraha/tok_sangraha/tok_jobs4/tokenizer_7.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fd8e589157b1cae6991d53a8395e041b98fdbe1f --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs4/tokenizer_7.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-7 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_7.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs4/tokenizer_8.yaml b/sangraha/tok_sangraha/tok_jobs4/tokenizer_8.yaml new file mode 100644 index 0000000000000000000000000000000000000000..104b9fdc3961e3d7fc76c37bd6dd826a2ae71d5f --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs4/tokenizer_8.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-8 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_8.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + diff --git a/sangraha/tok_sangraha/tok_jobs4/tokenizer_9.yaml b/sangraha/tok_sangraha/tok_jobs4/tokenizer_9.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a365dfc62693278ffa9453663760e92a6dbd7a8c --- /dev/null +++ b/sangraha/tok_sangraha/tok_jobs4/tokenizer_9.yaml @@ -0,0 +1,122 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-sangraha-9 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 0 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + sleep 10; + export SYNAPSE_VERSION="1.17.0"; + export WORKER_DIR="/mnt/weka/peacock/training/llama3b"; + export MODEL_PATH="${WORKER_DIR}/Megatron-DeepSpeed"; + export MEGATRON_DEEPSPEED_ROOT=$MODEL_PATH; + export MODEL_REQ="${MODEL_PATH}/megatron/core/requirements.txt"; + + export TOKENIZER_CMD=/mnt/weka/peacock/peacock-data/idc/datasets/sangraha//tok_sangraha/tok_files/tokenizer_9.sh + + export HL_LLAMA_VER=2; + export HL_LLAMA_MODEL_SIZE=3; + + export HL_DATA_DIR_ROOT="/mnt/weka/peacock/peacock-data/idc/hineng/hn_eng_bn/merged_all"; + export HL_DATA_FILE_PREFIX="all"; + export HL_DATASET_NAME=slimpajama-subset; + export HL_DP=16; + export HL_PP=2; + export HL_TP=4; + export HL_TRAIN_ITERS=240000; + export HL_SAVE_INTERVAL=1000; + + #export HL_EXIT_INTERVAL=100; + #export HL_SAVE=0; + + export HL_MICRO_BATCH=8; + + export HL_TOKENIZER_TYPE=SentencePieceTokenizer; + export HL_TOKENIZER_DIR="/mnt/weka/peacock/peacock-data/tokenization/trained-tokenizer/enhiben_50k"; + + export RESULT_DESCRIPTION="${HL_DATASET_NAME}-TI${HL_TRAIN_ITERS}-TP${HL_TP}PP${HL_PP}DP${HL_DP}-MBS${HL_MICRO_BATCH}"; + export DEBUG=True; + + set -exo pipefail; + + + $WORKER_DIR/launch/setup.sh; + + mpirun --npernode 1 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x MODEL_REQ=$MODEL_REQ \ + -x WORKER_DIR=$WORKER_DIR \ + -x MODEL_PATH=$MODEL_PATH \ + -x MODEL_REFERENCES_ROOT=$MODEL_REFERENCES_ROOT \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION \ + $WORKER_DIR/launch/setup.sh; + + cd $MODEL_PATH; + + mpirun -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + Worker: + replicas: 1 + template: + spec: + runtimeClassName: habana + nodeSelector: + role: worker + volumes: + - name: pvc-wekafs-peacock + persistentVolumeClaim: + claimName: pvc-wekafs-peacock + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + name: llama2-3b-pretrain-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 600Gi + cpu: 120 + volumeMounts: + - name: pvc-wekafs-peacock + mountPath: /mnt/weka/peacock +