diff --git a/dravid/tok_dravid/tok_files/tokenizer_103.sh b/dravid/tok_dravid/tok_files/tokenizer_103.sh new file mode 100644 index 0000000000000000000000000000000000000000..87774f9c049ce59dcca2bcada71d529b7a95d01c --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_103.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbz_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer103/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbz_splitab \ + --output-prefix $FINAL_DIR/tokenizer103/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_104.sh b/dravid/tok_dravid/tok_files/tokenizer_104.sh new file mode 100644 index 0000000000000000000000000000000000000000..7863f3249a2b1ea12cf67ab3e9fcf7fc46a3f909 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_104.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalca_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer104/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalca_splitaa \ + --output-prefix $FINAL_DIR/tokenizer104/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_106.sh b/dravid/tok_dravid/tok_files/tokenizer_106.sh new file mode 100644 index 0000000000000000000000000000000000000000..b1e7b199d048e6da6c8c5a1f4f24ef4ecd706048 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_106.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcb_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer106/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcb_splitaa \ + --output-prefix $FINAL_DIR/tokenizer106/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_107.sh b/dravid/tok_dravid/tok_files/tokenizer_107.sh new file mode 100644 index 0000000000000000000000000000000000000000..579d045500a032e8be1c5a2688557fa83e4df6a0 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_107.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcb_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer107/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcb_splitab \ + --output-prefix $FINAL_DIR/tokenizer107/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_109.sh b/dravid/tok_dravid/tok_files/tokenizer_109.sh new file mode 100644 index 0000000000000000000000000000000000000000..6fcda9efef577f97a6f9bf00012bab5b85bbddf9 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_109.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcc_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer109/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcc_splitab \ + --output-prefix $FINAL_DIR/tokenizer109/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_11.sh b/dravid/tok_dravid/tok_files/tokenizer_11.sh new file mode 100644 index 0000000000000000000000000000000000000000..3b15d1493059654a2220f413ff860a55bdd925dd --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_11.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaf_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer11/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaf_splitab \ + --output-prefix $FINAL_DIR/tokenizer11/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_112.sh b/dravid/tok_dravid/tok_files/tokenizer_112.sh new file mode 100644 index 0000000000000000000000000000000000000000..d3b90d03b5e030f2c03e38ac5be0b1bdfa527c88 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_112.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalce_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer112/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalce_splitaa \ + --output-prefix $FINAL_DIR/tokenizer112/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_113.sh b/dravid/tok_dravid/tok_files/tokenizer_113.sh new file mode 100644 index 0000000000000000000000000000000000000000..59a95323987410fbe4994e728c0333297744aee7 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_113.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalce_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer113/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalce_splitab \ + --output-prefix $FINAL_DIR/tokenizer113/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_115.sh b/dravid/tok_dravid/tok_files/tokenizer_115.sh new file mode 100644 index 0000000000000000000000000000000000000000..7862787e24d7a8d9999fe29c54e49213ef5957b0 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_115.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcf_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer115/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcf_splitab \ + --output-prefix $FINAL_DIR/tokenizer115/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_116.sh b/dravid/tok_dravid/tok_files/tokenizer_116.sh new file mode 100644 index 0000000000000000000000000000000000000000..cc388af60c3de66c6c7d44f32c84d613b21e2838 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_116.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcg_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer116/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcg_splitaa \ + --output-prefix $FINAL_DIR/tokenizer116/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_119.sh b/dravid/tok_dravid/tok_files/tokenizer_119.sh new file mode 100644 index 0000000000000000000000000000000000000000..75207d09c45b6258bbaed38de60c5c0c3cea7b4e --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_119.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalch_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer119/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalch_splitab \ + --output-prefix $FINAL_DIR/tokenizer119/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_120.sh b/dravid/tok_dravid/tok_files/tokenizer_120.sh new file mode 100644 index 0000000000000000000000000000000000000000..f531aecd2e395934d60b595e256da3e21fd17923 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_120.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalci_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer120/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalci_splitaa \ + --output-prefix $FINAL_DIR/tokenizer120/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_125.sh b/dravid/tok_dravid/tok_files/tokenizer_125.sh new file mode 100644 index 0000000000000000000000000000000000000000..d49356bd978cab4b9ceec0a4bc1ee25d41a6b8cb --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_125.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalck_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer125/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalck_splitab \ + --output-prefix $FINAL_DIR/tokenizer125/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_126.sh b/dravid/tok_dravid/tok_files/tokenizer_126.sh new file mode 100644 index 0000000000000000000000000000000000000000..bc6c3946a66be946af4f53d839225da80f1e8a56 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_126.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcl +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer126/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcl \ + --output-prefix $FINAL_DIR/tokenizer126/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_13.sh b/dravid/tok_dravid/tok_files/tokenizer_13.sh new file mode 100644 index 0000000000000000000000000000000000000000..2891b0a66e74e0064822d0f1fee1595fc76db0a4 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_13.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalag_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer13/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalag_splitab \ + --output-prefix $FINAL_DIR/tokenizer13/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_15.sh b/dravid/tok_dravid/tok_files/tokenizer_15.sh new file mode 100644 index 0000000000000000000000000000000000000000..300bdd23a86ceb93ca801b5790726a20be03f3b2 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_15.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalah_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer15/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalah_splitab \ + --output-prefix $FINAL_DIR/tokenizer15/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_16.sh b/dravid/tok_dravid/tok_files/tokenizer_16.sh new file mode 100644 index 0000000000000000000000000000000000000000..c3c4c73a06ad48a14764cffe637cc701eadb2d5c --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_16.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalai_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer16/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalai_splitaa \ + --output-prefix $FINAL_DIR/tokenizer16/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_17.sh b/dravid/tok_dravid/tok_files/tokenizer_17.sh new file mode 100644 index 0000000000000000000000000000000000000000..6b7c690f7d87f62703a7876754ede3c511099d18 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_17.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalai_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer17/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalai_splitab \ + --output-prefix $FINAL_DIR/tokenizer17/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_20.sh b/dravid/tok_dravid/tok_files/tokenizer_20.sh new file mode 100644 index 0000000000000000000000000000000000000000..9368a9571efc528417ec3a3bd04464af52b56a09 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_20.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalak_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer20/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalak_splitaa \ + --output-prefix $FINAL_DIR/tokenizer20/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_21.sh b/dravid/tok_dravid/tok_files/tokenizer_21.sh new file mode 100644 index 0000000000000000000000000000000000000000..c8c6f9f4098495be951bd20f4c4d49d06e6003df --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_21.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalak_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer21/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalak_splitab \ + --output-prefix $FINAL_DIR/tokenizer21/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_23.sh b/dravid/tok_dravid/tok_files/tokenizer_23.sh new file mode 100644 index 0000000000000000000000000000000000000000..2b8cfa9ea0ef30ce66730035d78e06a4807b4c29 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_23.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalal_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer23/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalal_splitab \ + --output-prefix $FINAL_DIR/tokenizer23/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_25.sh b/dravid/tok_dravid/tok_files/tokenizer_25.sh new file mode 100644 index 0000000000000000000000000000000000000000..b65e93d6e477af57b54bb71ca62441da054a33b8 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_25.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalam_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer25/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalam_splitab \ + --output-prefix $FINAL_DIR/tokenizer25/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_26.sh b/dravid/tok_dravid/tok_files/tokenizer_26.sh new file mode 100644 index 0000000000000000000000000000000000000000..368e0657ac2ff2511ffbb817ac20204613a025e7 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_26.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalan_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer26/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalan_splitaa \ + --output-prefix $FINAL_DIR/tokenizer26/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_29.sh b/dravid/tok_dravid/tok_files/tokenizer_29.sh new file mode 100644 index 0000000000000000000000000000000000000000..d8ec9fb57bbb0664f875c4bcfb2d0ad1f91f7745 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_29.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalao_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer29/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalao_splitab \ + --output-prefix $FINAL_DIR/tokenizer29/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_31.sh b/dravid/tok_dravid/tok_files/tokenizer_31.sh new file mode 100644 index 0000000000000000000000000000000000000000..243b1cff01c444fc6e36a9d80929f2c2d20942e4 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_31.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalap_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer31/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalap_splitab \ + --output-prefix $FINAL_DIR/tokenizer31/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_32.sh b/dravid/tok_dravid/tok_files/tokenizer_32.sh new file mode 100644 index 0000000000000000000000000000000000000000..db03fb40b94cbe2b0b1e2b6d2f9631350e5960fd --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_32.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaq_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer32/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaq_splitaa \ + --output-prefix $FINAL_DIR/tokenizer32/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_36.sh b/dravid/tok_dravid/tok_files/tokenizer_36.sh new file mode 100644 index 0000000000000000000000000000000000000000..b9199328775d54544bc969ee1e4508cf76b43bb6 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_36.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalas_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer36/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalas_splitaa \ + --output-prefix $FINAL_DIR/tokenizer36/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_38.sh b/dravid/tok_dravid/tok_files/tokenizer_38.sh new file mode 100644 index 0000000000000000000000000000000000000000..a10305727c3865e38638df57862e53374314deed --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_38.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalat_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer38/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalat_splitaa \ + --output-prefix $FINAL_DIR/tokenizer38/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_48.sh b/dravid/tok_dravid/tok_files/tokenizer_48.sh new file mode 100644 index 0000000000000000000000000000000000000000..e33f0470cd0488f910586873bb2bb82c07b3c7df --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_48.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalay_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer48/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalay_splitaa \ + --output-prefix $FINAL_DIR/tokenizer48/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_50.sh b/dravid/tok_dravid/tok_files/tokenizer_50.sh new file mode 100644 index 0000000000000000000000000000000000000000..77d8503ef5b123f81ee954533206bd0e4fd59e42 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_50.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaz_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer50/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaz_splitaa \ + --output-prefix $FINAL_DIR/tokenizer50/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_51.sh b/dravid/tok_dravid/tok_files/tokenizer_51.sh new file mode 100644 index 0000000000000000000000000000000000000000..1aee98696e0c35fe2cea8f08b18acb677b825d04 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_51.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaz_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer51/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaz_splitab \ + --output-prefix $FINAL_DIR/tokenizer51/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_57.sh b/dravid/tok_dravid/tok_files/tokenizer_57.sh new file mode 100644 index 0000000000000000000000000000000000000000..0e6df397a6e2bf370a9a8d8bfdd0b33b6fd38f55 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_57.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbc_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer57/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbc_splitab \ + --output-prefix $FINAL_DIR/tokenizer57/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_58.sh b/dravid/tok_dravid/tok_files/tokenizer_58.sh new file mode 100644 index 0000000000000000000000000000000000000000..9ded1ebd8f1e95a9940b75f6282cf2b444d59403 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_58.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbd_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer58/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbd_splitaa \ + --output-prefix $FINAL_DIR/tokenizer58/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_59.sh b/dravid/tok_dravid/tok_files/tokenizer_59.sh new file mode 100644 index 0000000000000000000000000000000000000000..58b485738a6a067e34765e8505a6bacca118fbfe --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_59.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbd_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer59/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbd_splitab \ + --output-prefix $FINAL_DIR/tokenizer59/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_6.sh b/dravid/tok_dravid/tok_files/tokenizer_6.sh new file mode 100644 index 0000000000000000000000000000000000000000..54d189ae9455cb0883eac1c91b68ccad658113b8 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_6.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer6/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer6/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_62.sh b/dravid/tok_dravid/tok_files/tokenizer_62.sh new file mode 100644 index 0000000000000000000000000000000000000000..99a5d68262ca1a03c4bf7826789940a2bfc550a9 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_62.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbf_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer62/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbf_splitaa \ + --output-prefix $FINAL_DIR/tokenizer62/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_63.sh b/dravid/tok_dravid/tok_files/tokenizer_63.sh new file mode 100644 index 0000000000000000000000000000000000000000..8334131ec00bc9f449b1497c414836c91a188c60 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_63.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbf_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer63/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbf_splitab \ + --output-prefix $FINAL_DIR/tokenizer63/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_69.sh b/dravid/tok_dravid/tok_files/tokenizer_69.sh new file mode 100644 index 0000000000000000000000000000000000000000..8c2b865822c9a833699cae3b20c2cd6be3fa975f --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_69.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbi_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer69/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbi_splitab \ + --output-prefix $FINAL_DIR/tokenizer69/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_73.sh b/dravid/tok_dravid/tok_files/tokenizer_73.sh new file mode 100644 index 0000000000000000000000000000000000000000..b488094f45a5803b5304aaa2b935ccbcbd0660db --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_73.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbk_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer73/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbk_splitab \ + --output-prefix $FINAL_DIR/tokenizer73/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_75.sh b/dravid/tok_dravid/tok_files/tokenizer_75.sh new file mode 100644 index 0000000000000000000000000000000000000000..1df26a6b880bc2f8cd5b5400f908f23a0fe9e620 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_75.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbl_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer75/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbl_splitab \ + --output-prefix $FINAL_DIR/tokenizer75/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_76.sh b/dravid/tok_dravid/tok_files/tokenizer_76.sh new file mode 100644 index 0000000000000000000000000000000000000000..fe4b7b8ce11b0ee1a4fddc4d67ef7c51c890324e --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_76.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbm_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer76/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbm_splitaa \ + --output-prefix $FINAL_DIR/tokenizer76/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_78.sh b/dravid/tok_dravid/tok_files/tokenizer_78.sh new file mode 100644 index 0000000000000000000000000000000000000000..75b353699056e3018d5907d422bd020289e319de --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_78.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbn_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer78/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbn_splitaa \ + --output-prefix $FINAL_DIR/tokenizer78/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_79.sh b/dravid/tok_dravid/tok_files/tokenizer_79.sh new file mode 100644 index 0000000000000000000000000000000000000000..e975259743455c6d80c774c73ee7f16ee80fd0e2 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_79.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbn_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer79/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbn_splitab \ + --output-prefix $FINAL_DIR/tokenizer79/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_85.sh b/dravid/tok_dravid/tok_files/tokenizer_85.sh new file mode 100644 index 0000000000000000000000000000000000000000..239124f9798dd91795433a4eff94133a6e6958aa --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_85.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbq_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer85/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbq_splitab \ + --output-prefix $FINAL_DIR/tokenizer85/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_86.sh b/dravid/tok_dravid/tok_files/tokenizer_86.sh new file mode 100644 index 0000000000000000000000000000000000000000..189e456c14c7d2cda2934f43fc95b54b68d34e97 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_86.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbr_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer86/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbr_splitaa \ + --output-prefix $FINAL_DIR/tokenizer86/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_87.sh b/dravid/tok_dravid/tok_files/tokenizer_87.sh new file mode 100644 index 0000000000000000000000000000000000000000..b9db987c96e4b1e77571c2feab12755e5a3e1dbf --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_87.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbr_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer87/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbr_splitab \ + --output-prefix $FINAL_DIR/tokenizer87/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_89.sh b/dravid/tok_dravid/tok_files/tokenizer_89.sh new file mode 100644 index 0000000000000000000000000000000000000000..43b91bc805eafa627116e4ed1aa4534487fa80b1 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_89.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbs_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer89/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbs_splitab \ + --output-prefix $FINAL_DIR/tokenizer89/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_9.sh b/dravid/tok_dravid/tok_files/tokenizer_9.sh new file mode 100644 index 0000000000000000000000000000000000000000..740f37903c9ee483b98fadf41fcf9bf36197a209 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_9.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalae_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer9/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalae_splitab \ + --output-prefix $FINAL_DIR/tokenizer9/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_90.sh b/dravid/tok_dravid/tok_files/tokenizer_90.sh new file mode 100644 index 0000000000000000000000000000000000000000..124610495504dff5f3cf0d00dfd534139d8ec431 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_90.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbt_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer90/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbt_splitaa \ + --output-prefix $FINAL_DIR/tokenizer90/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_91.sh b/dravid/tok_dravid/tok_files/tokenizer_91.sh new file mode 100644 index 0000000000000000000000000000000000000000..44c7fb3db8b38ad5934d99f42ac34d1a2ddfc6f9 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_91.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbt_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer91/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbt_splitab \ + --output-prefix $FINAL_DIR/tokenizer91/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_97.sh b/dravid/tok_dravid/tok_files/tokenizer_97.sh new file mode 100644 index 0000000000000000000000000000000000000000..ec62634cdc40e3b92a2bc49b31ca73212f6d8c31 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_97.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbw_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer97/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbw_splitab \ + --output-prefix $FINAL_DIR/tokenizer97/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_98.sh b/dravid/tok_dravid/tok_files/tokenizer_98.sh new file mode 100644 index 0000000000000000000000000000000000000000..e196abe78ad70c6c5e21131f6f41266028f1c24d --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_98.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbx_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer98/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbx_splitaa \ + --output-prefix $FINAL_DIR/tokenizer98/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_111.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_111.yaml new file mode 100644 index 0000000000000000000000000000000000000000..380d4a8540b78f47382392064b245995ff59bc0f --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_111.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-111 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-111 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-111-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_111.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_113.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_113.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0664f942a221a017649c234125101167b3150f9a --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_113.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-113 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-113 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-113-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_113.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_12.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_12.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d90f313cdbea61149e20e4f0fb171fbd3cba5d06 --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_12.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-12 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-12 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-12-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_12.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_122.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_122.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8629603b71056567ceb0e183009fe1ba9b87c399 --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_122.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-122 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-122 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-122-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_122.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_15.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_15.yaml new file mode 100644 index 0000000000000000000000000000000000000000..45e2a88cb78f0d4f5a658d9e4ade5f350ea429a9 --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_15.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-15 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-15 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-15-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_15.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_17.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_17.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d272d70cd9895db05c7c1315225c0a84d29319ce --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_17.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-17 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-17 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-17-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_17.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_18.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_18.yaml new file mode 100644 index 0000000000000000000000000000000000000000..eeb6c24cf8fec0e284273b13310f8e3dfa442f28 --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_18.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-18 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-18 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-18-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_18.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_19.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_19.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ef27bc1adad1ad555ea8418b0a13d3bc4e613cff --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_19.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-19 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-19 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-19-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_19.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_20.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_20.yaml new file mode 100644 index 0000000000000000000000000000000000000000..72ff40bf84c827710858ac37408aabfd04a16cbe --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_20.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-20 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-20 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-20-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_20.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_38.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_38.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4354239c9cd391c1aa4cd8a6eef0a03bfb39f196 --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_38.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-38 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-38 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-38-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_38.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_4.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_4.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0e08c283e70b3e28e492c588a24cfdd06d897824 --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_4.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-4 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-4 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-4-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_4.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_43.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_43.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cdc2b345e35eeb565c9e778bdbfb7e44584bc8ea --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_43.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-43 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-43 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-43-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_43.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_60.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_60.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bc100d8e6ab34f717a559b02f0be75a3c4ea9836 --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_60.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-60 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-60 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-60-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_60.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_67.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_67.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9c93c34007168cbbd8d1fb554f83d2e0cca49db4 --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_67.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-67 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-67 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-67-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_67.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_69.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_69.yaml new file mode 100644 index 0000000000000000000000000000000000000000..685e9446dc3e28ee959dd1494698a46c3d0ac5f5 --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_69.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-69 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-69 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-69-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_69.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_77.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_77.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0167dc3677c88da3f09d887bee87d62b182fef5e --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_77.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-77 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-77 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-77-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_77.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_79.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_79.yaml new file mode 100644 index 0000000000000000000000000000000000000000..98922ff391f51375c9af1bd4f502090f5d5762c6 --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_79.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-79 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-79 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-79-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_79.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_82.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_82.yaml new file mode 100644 index 0000000000000000000000000000000000000000..563e39066be7df79536503d5292eabd40cc1771a --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_82.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-82 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-82 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-82-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_82.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_89.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_89.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b64293351bd2e4ba2b045a71a308618e56ac6521 --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_89.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-89 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-89 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-89-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_89.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_94.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_94.yaml new file mode 100644 index 0000000000000000000000000000000000000000..265d8601c170e26406d980517cfd9b02b3515c03 --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_94.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-94 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-94 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-94-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_94.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_95.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_95.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a2cfb2eca0110044857a3f6d2b02714bafe5a6d7 --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_95.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-95 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-95 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-95-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_95.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_97.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_97.yaml new file mode 100644 index 0000000000000000000000000000000000000000..226ac7fb814be465fcf50dfd2d8d457ab3d20863 --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_97.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-97 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-97 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-97-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_97.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock diff --git a/dravid/tok_dravid/tok_jobs/tokenizer_98.yaml b/dravid/tok_dravid/tok_jobs/tokenizer_98.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b6a08280a9c269f0ff283bc1149c803ce460d80b --- /dev/null +++ b/dravid/tok_dravid/tok_jobs/tokenizer_98.yaml @@ -0,0 +1,104 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: peacock-tokenizer-dravid-98 + namespace: peacock + labels: + app: peacock-tokenizer-dravid-98 +spec: + slotsPerWorker: 8 + runPolicy: + backoffLimit: 1 + cleanPodPolicy: Running + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + hostIPC: true + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-tokenizer-dravid-98-launcher + imagePullPolicy: IfNotPresent + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock + command: ["/bin/bash", "-c"] + args: + - >- + + export SYNAPSE_VERSION="1.15.1"; + export WORKER_DIR="/mnt/weka/peacock/experiments/llama"; + + export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh" + export TOKENIZER_CMD=/mnt/weka/peacock/idc/datasets/dravid//tok_dravid/tok_files/tokenizer_98.sh + HOSTSFILE=$OMPI_MCA_orte_default_hostfile; + echo "HOSTSFILE=$HOSTSFILE"; + MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)"; + NUM_NODES=$(wc -l < $HOSTSFILE); + CARDS_PER_NODE=8; + N_CARDS=$((NUM_NODES*CARDS_PER_NODE)); + echo "MPI_ROOT=$MPI_ROOT"; + echo "N_CARDS=$N_CARDS"; + echo "MASTER_ADDR=$MASTER_ADDR"; + sleep 20; + + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD; + + mpirun -np $N_CARDS -npernode 8 \ + --tag-output \ + --allow-run-as-root \ + --prefix $MPI_ROOT \ + -x WORKER_DIR=$WORKER_DIR \ + -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD; + + + Worker: + replicas: 1 + template: + spec: + volumes: + - name: work-dir + persistentVolumeClaim: + claimName: peacock-fs-pvc + tolerations: + - key: "habana.ai/gaudi" + operator: "Exists" + effect: "NoSchedule" + - key: "k8s/namespace" + operator: "Equal" + value: "peacock" + effect: "NoSchedule" + hostIPC: true + containers: + - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest + name: peacock-llama-worker + imagePullPolicy: IfNotPresent + securityContext: + capabilities: + add: + - SYSLOG + resources: + limits: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + requests: + habana.ai/gaudi: 8 + hugepages-2Mi: 300Gi + memory: 700Gi + cpu: 150 + volumeMounts: + - name: work-dir + mountPath: /mnt/weka/peacock