diff --git a/dravid/tok_dravid/tok_files/tokenizer_0.sh b/dravid/tok_dravid/tok_files/tokenizer_0.sh new file mode 100644 index 0000000000000000000000000000000000000000..01537517d318759e42cf75832f0d5f590c2848de --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_0.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaa_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer0/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaa_splitaa \ + --output-prefix $FINAL_DIR/tokenizer0/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_1.sh b/dravid/tok_dravid/tok_files/tokenizer_1.sh new file mode 100644 index 0000000000000000000000000000000000000000..d2e78d2a35eeec990d2c5fd65b8c09f0bd8a1965 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_1.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaa_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer1/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaa_splitab \ + --output-prefix $FINAL_DIR/tokenizer1/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_10.sh b/dravid/tok_dravid/tok_files/tokenizer_10.sh new file mode 100644 index 0000000000000000000000000000000000000000..cb9cd0ebca3b29452f7ef1301b106858695176eb --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_10.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaf_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer10/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaf_splitaa \ + --output-prefix $FINAL_DIR/tokenizer10/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_100.sh b/dravid/tok_dravid/tok_files/tokenizer_100.sh new file mode 100644 index 0000000000000000000000000000000000000000..0d0bde1332d56511876aac3f92b210cf7bd7f255 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_100.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalby_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer100/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalby_splitaa \ + --output-prefix $FINAL_DIR/tokenizer100/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_101.sh b/dravid/tok_dravid/tok_files/tokenizer_101.sh new file mode 100644 index 0000000000000000000000000000000000000000..2778378cbfc3ebbfb5b74f2857ecf6dcfb398522 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_101.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalby_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer101/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalby_splitab \ + --output-prefix $FINAL_DIR/tokenizer101/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_102.sh b/dravid/tok_dravid/tok_files/tokenizer_102.sh new file mode 100644 index 0000000000000000000000000000000000000000..b22749318dd4d6e7607449764240c56a8f678dcc --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_102.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbz_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer102/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbz_splitaa \ + --output-prefix $FINAL_DIR/tokenizer102/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_105.sh b/dravid/tok_dravid/tok_files/tokenizer_105.sh new file mode 100644 index 0000000000000000000000000000000000000000..e697bb3431a787128cd96df3296f149bc7dc06e9 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_105.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalca_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer105/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalca_splitab \ + --output-prefix $FINAL_DIR/tokenizer105/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_108.sh b/dravid/tok_dravid/tok_files/tokenizer_108.sh new file mode 100644 index 0000000000000000000000000000000000000000..b6878f8e51928817f7c2ab941b58629b2bb532d2 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_108.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcc_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer108/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcc_splitaa \ + --output-prefix $FINAL_DIR/tokenizer108/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_110.sh b/dravid/tok_dravid/tok_files/tokenizer_110.sh new file mode 100644 index 0000000000000000000000000000000000000000..d19a2fcc7d3f8db6d67121c8c7bcaff0af2906b4 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_110.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcd_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer110/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcd_splitaa \ + --output-prefix $FINAL_DIR/tokenizer110/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_111.sh b/dravid/tok_dravid/tok_files/tokenizer_111.sh new file mode 100644 index 0000000000000000000000000000000000000000..5f152ceadc6caa0d50c4f223071db86d4bb4011e --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_111.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcd_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer111/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcd_splitab \ + --output-prefix $FINAL_DIR/tokenizer111/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_114.sh b/dravid/tok_dravid/tok_files/tokenizer_114.sh new file mode 100644 index 0000000000000000000000000000000000000000..fe53e5a514dffd19c7e3028f10927a09c78baab4 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_114.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcf_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer114/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcf_splitaa \ + --output-prefix $FINAL_DIR/tokenizer114/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_117.sh b/dravid/tok_dravid/tok_files/tokenizer_117.sh new file mode 100644 index 0000000000000000000000000000000000000000..48bb9762c0e9bb7d6aae3fcaf8dc7f62ccbedb76 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_117.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcg_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer117/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcg_splitab \ + --output-prefix $FINAL_DIR/tokenizer117/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_118.sh b/dravid/tok_dravid/tok_files/tokenizer_118.sh new file mode 100644 index 0000000000000000000000000000000000000000..3632533f7cf70b36b4ace63aa61a9d06ef19e4c4 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_118.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalch_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer118/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalch_splitaa \ + --output-prefix $FINAL_DIR/tokenizer118/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_12.sh b/dravid/tok_dravid/tok_files/tokenizer_12.sh new file mode 100644 index 0000000000000000000000000000000000000000..607a03ce9b84a5804b46a0a4d6d2a3f22cce0327 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_12.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalag_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer12/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalag_splitaa \ + --output-prefix $FINAL_DIR/tokenizer12/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_121.sh b/dravid/tok_dravid/tok_files/tokenizer_121.sh new file mode 100644 index 0000000000000000000000000000000000000000..f696d7bb2cc39b9f095c4172f39f27b14ea8cac6 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_121.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalci_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer121/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalci_splitab \ + --output-prefix $FINAL_DIR/tokenizer121/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_122.sh b/dravid/tok_dravid/tok_files/tokenizer_122.sh new file mode 100644 index 0000000000000000000000000000000000000000..1a05a5e201d33e3617b732f71e0796a2da8cbdab --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_122.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcj_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer122/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcj_splitaa \ + --output-prefix $FINAL_DIR/tokenizer122/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_123.sh b/dravid/tok_dravid/tok_files/tokenizer_123.sh new file mode 100644 index 0000000000000000000000000000000000000000..788914386a002431c0b14a633f770d9125eca2d2 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_123.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcj_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer123/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcj_splitab \ + --output-prefix $FINAL_DIR/tokenizer123/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_124.sh b/dravid/tok_dravid/tok_files/tokenizer_124.sh new file mode 100644 index 0000000000000000000000000000000000000000..5c8f5d689d6c979436ac338464a945ebbdf0e353 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_124.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalck_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer124/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalck_splitaa \ + --output-prefix $FINAL_DIR/tokenizer124/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_14.sh b/dravid/tok_dravid/tok_files/tokenizer_14.sh new file mode 100644 index 0000000000000000000000000000000000000000..7beb7956ee2078d65f2ce5acc2d9df8540b522d2 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_14.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalah_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer14/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalah_splitaa \ + --output-prefix $FINAL_DIR/tokenizer14/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_18.sh b/dravid/tok_dravid/tok_files/tokenizer_18.sh new file mode 100644 index 0000000000000000000000000000000000000000..606b4d8a1ba3b5d5ec4ce687136c81a03e6f21e9 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_18.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaj_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer18/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaj_splitaa \ + --output-prefix $FINAL_DIR/tokenizer18/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_19.sh b/dravid/tok_dravid/tok_files/tokenizer_19.sh new file mode 100644 index 0000000000000000000000000000000000000000..93b324b80665f60b6e1752331a11db1b881ec5bd --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_19.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaj_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer19/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaj_splitab \ + --output-prefix $FINAL_DIR/tokenizer19/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_2.sh b/dravid/tok_dravid/tok_files/tokenizer_2.sh new file mode 100644 index 0000000000000000000000000000000000000000..9a2a71d67aee5cdefdeadc870f94a257cbb1150c --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_2.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalab_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer2/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalab_splitaa \ + --output-prefix $FINAL_DIR/tokenizer2/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_22.sh b/dravid/tok_dravid/tok_files/tokenizer_22.sh new file mode 100644 index 0000000000000000000000000000000000000000..dffde4575c2fb327d363424d14755b6e29f8fefa --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_22.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalal_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer22/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalal_splitaa \ + --output-prefix $FINAL_DIR/tokenizer22/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_24.sh b/dravid/tok_dravid/tok_files/tokenizer_24.sh new file mode 100644 index 0000000000000000000000000000000000000000..2cc1c3e05c44bac1f9f5eb64cf7d79fd72f06b4d --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_24.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalam_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer24/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalam_splitaa \ + --output-prefix $FINAL_DIR/tokenizer24/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_27.sh b/dravid/tok_dravid/tok_files/tokenizer_27.sh new file mode 100644 index 0000000000000000000000000000000000000000..a9f9cf2451efb173427d8d574fef499577a010f7 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_27.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalan_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer27/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalan_splitab \ + --output-prefix $FINAL_DIR/tokenizer27/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_28.sh b/dravid/tok_dravid/tok_files/tokenizer_28.sh new file mode 100644 index 0000000000000000000000000000000000000000..de1a5a7ca7f131346dc01f189f4432986e84e55c --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_28.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalao_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer28/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalao_splitaa \ + --output-prefix $FINAL_DIR/tokenizer28/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_3.sh b/dravid/tok_dravid/tok_files/tokenizer_3.sh new file mode 100644 index 0000000000000000000000000000000000000000..f9c81578b8e1758552b85168119d66514952b028 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_3.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer3/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalab_splitab \ + --output-prefix $FINAL_DIR/tokenizer3/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_30.sh b/dravid/tok_dravid/tok_files/tokenizer_30.sh new file mode 100644 index 0000000000000000000000000000000000000000..db3f252725fe0b093f22c2368423900514cd474e --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_30.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalap_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer30/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalap_splitaa \ + --output-prefix $FINAL_DIR/tokenizer30/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_33.sh b/dravid/tok_dravid/tok_files/tokenizer_33.sh new file mode 100644 index 0000000000000000000000000000000000000000..9bddcf76152647580d9620abe7d29e1536ee798f --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_33.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaq_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer33/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaq_splitab \ + --output-prefix $FINAL_DIR/tokenizer33/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_34.sh b/dravid/tok_dravid/tok_files/tokenizer_34.sh new file mode 100644 index 0000000000000000000000000000000000000000..a01949cd06d5e1bc15ef67a71f72e01cd129a5e7 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_34.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalar_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer34/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalar_splitaa \ + --output-prefix $FINAL_DIR/tokenizer34/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_35.sh b/dravid/tok_dravid/tok_files/tokenizer_35.sh new file mode 100644 index 0000000000000000000000000000000000000000..8da8450b3ba84fa00e919a7fdd1c1eb9f32b6604 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_35.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalar_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer35/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalar_splitab \ + --output-prefix $FINAL_DIR/tokenizer35/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_37.sh b/dravid/tok_dravid/tok_files/tokenizer_37.sh new file mode 100644 index 0000000000000000000000000000000000000000..776c59e3b6d8fba66555afc0ec85e8788e98b6ca --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_37.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalas_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer37/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalas_splitab \ + --output-prefix $FINAL_DIR/tokenizer37/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_39.sh b/dravid/tok_dravid/tok_files/tokenizer_39.sh new file mode 100644 index 0000000000000000000000000000000000000000..07896bc82b3fe636c2a2bb24c1c5bc4eb0861eac --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_39.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalat_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer39/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalat_splitab \ + --output-prefix $FINAL_DIR/tokenizer39/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_4.sh b/dravid/tok_dravid/tok_files/tokenizer_4.sh new file mode 100644 index 0000000000000000000000000000000000000000..903daf221f4b956406364eacc0d1dafa1c839916 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_4.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalac_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer4/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalac_splitaa \ + --output-prefix $FINAL_DIR/tokenizer4/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_40.sh b/dravid/tok_dravid/tok_files/tokenizer_40.sh new file mode 100644 index 0000000000000000000000000000000000000000..c1baf667a8442093fd4915db40951f15ee152b5c --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_40.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalau_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer40/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalau_splitaa \ + --output-prefix $FINAL_DIR/tokenizer40/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_41.sh b/dravid/tok_dravid/tok_files/tokenizer_41.sh new file mode 100644 index 0000000000000000000000000000000000000000..b0e04d480fad681135f8bbc38ed890a807a994cb --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_41.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalau_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer41/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalau_splitab \ + --output-prefix $FINAL_DIR/tokenizer41/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_42.sh b/dravid/tok_dravid/tok_files/tokenizer_42.sh new file mode 100644 index 0000000000000000000000000000000000000000..4cd5f5782c9bde0544c8a3a53233e4bfd6055dc6 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_42.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalav_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer42/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalav_splitaa \ + --output-prefix $FINAL_DIR/tokenizer42/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_43.sh b/dravid/tok_dravid/tok_files/tokenizer_43.sh new file mode 100644 index 0000000000000000000000000000000000000000..e51e4c71bef3a4c9a55afac56d706dafb7ec376c --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_43.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalav_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer43/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalav_splitab \ + --output-prefix $FINAL_DIR/tokenizer43/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_44.sh b/dravid/tok_dravid/tok_files/tokenizer_44.sh new file mode 100644 index 0000000000000000000000000000000000000000..ccd59800c36f69a287b82aee7a0ade2ca67d60e4 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_44.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaw_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer44/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaw_splitaa \ + --output-prefix $FINAL_DIR/tokenizer44/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_45.sh b/dravid/tok_dravid/tok_files/tokenizer_45.sh new file mode 100644 index 0000000000000000000000000000000000000000..860e449c3bcacc9b6f63b6e5b17fffdfd12c9f97 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_45.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaw_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer45/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaw_splitab \ + --output-prefix $FINAL_DIR/tokenizer45/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_46.sh b/dravid/tok_dravid/tok_files/tokenizer_46.sh new file mode 100644 index 0000000000000000000000000000000000000000..361d695ae33349d1d9f22619581c20acf8680c59 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_46.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalax_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer46/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalax_splitaa \ + --output-prefix $FINAL_DIR/tokenizer46/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_47.sh b/dravid/tok_dravid/tok_files/tokenizer_47.sh new file mode 100644 index 0000000000000000000000000000000000000000..4faaf553880fdba738f873728403943c41024b71 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_47.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalax_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer47/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalax_splitab \ + --output-prefix $FINAL_DIR/tokenizer47/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_49.sh b/dravid/tok_dravid/tok_files/tokenizer_49.sh new file mode 100644 index 0000000000000000000000000000000000000000..23ae06813bf368cc52356a5d70475788c772c38a --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_49.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalay_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer49/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalay_splitab \ + --output-prefix $FINAL_DIR/tokenizer49/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_5.sh b/dravid/tok_dravid/tok_files/tokenizer_5.sh new file mode 100644 index 0000000000000000000000000000000000000000..3ac0dc528a839a7ee9fc3b22ded121e8574057d0 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_5.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalac_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer5/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalac_splitab \ + --output-prefix $FINAL_DIR/tokenizer5/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_52.sh b/dravid/tok_dravid/tok_files/tokenizer_52.sh new file mode 100644 index 0000000000000000000000000000000000000000..bee8a2593e5653822b92e589156432a7432d046e --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_52.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalba_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer52/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalba_splitaa \ + --output-prefix $FINAL_DIR/tokenizer52/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_53.sh b/dravid/tok_dravid/tok_files/tokenizer_53.sh new file mode 100644 index 0000000000000000000000000000000000000000..145983ee89207912626dfba696b3b002c5ea4fd7 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_53.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalba_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer53/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalba_splitab \ + --output-prefix $FINAL_DIR/tokenizer53/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_54.sh b/dravid/tok_dravid/tok_files/tokenizer_54.sh new file mode 100644 index 0000000000000000000000000000000000000000..bada92811fe5d1e44c648362fae32c5a258b3da5 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_54.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbb_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer54/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbb_splitaa \ + --output-prefix $FINAL_DIR/tokenizer54/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_55.sh b/dravid/tok_dravid/tok_files/tokenizer_55.sh new file mode 100644 index 0000000000000000000000000000000000000000..1adaa6cd9f99f82d7ce1f488eead69db1b0d7430 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_55.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbb_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer55/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbb_splitab \ + --output-prefix $FINAL_DIR/tokenizer55/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_56.sh b/dravid/tok_dravid/tok_files/tokenizer_56.sh new file mode 100644 index 0000000000000000000000000000000000000000..40403ae59cd948fd5635e506559e883c788bad6e --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_56.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbc_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer56/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbc_splitaa \ + --output-prefix $FINAL_DIR/tokenizer56/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_60.sh b/dravid/tok_dravid/tok_files/tokenizer_60.sh new file mode 100644 index 0000000000000000000000000000000000000000..fe1e3b3d7636198407249f062ec7903047cd4af0 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_60.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbe_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer60/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbe_splitaa \ + --output-prefix $FINAL_DIR/tokenizer60/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_61.sh b/dravid/tok_dravid/tok_files/tokenizer_61.sh new file mode 100644 index 0000000000000000000000000000000000000000..1616be176df3bf1c8be15d436f482b373f37079a --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_61.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbe_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer61/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbe_splitab \ + --output-prefix $FINAL_DIR/tokenizer61/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_64.sh b/dravid/tok_dravid/tok_files/tokenizer_64.sh new file mode 100644 index 0000000000000000000000000000000000000000..1bca289bd8d3aca226c43f6e9e3141cb6f405dd3 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_64.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbg_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer64/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbg_splitaa \ + --output-prefix $FINAL_DIR/tokenizer64/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_65.sh b/dravid/tok_dravid/tok_files/tokenizer_65.sh new file mode 100644 index 0000000000000000000000000000000000000000..2d6645c28ed8d3613a8be6cf32accf1336da1ff0 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_65.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbg_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer65/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbg_splitab \ + --output-prefix $FINAL_DIR/tokenizer65/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_66.sh b/dravid/tok_dravid/tok_files/tokenizer_66.sh new file mode 100644 index 0000000000000000000000000000000000000000..9af601c52756ff3317943e5aa7f4f33fb7a38113 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_66.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbh_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer66/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbh_splitaa \ + --output-prefix $FINAL_DIR/tokenizer66/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_67.sh b/dravid/tok_dravid/tok_files/tokenizer_67.sh new file mode 100644 index 0000000000000000000000000000000000000000..5771f535cbe9ef2930da67a2b303cfbf8ef921e7 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_67.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbh_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer67/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbh_splitab \ + --output-prefix $FINAL_DIR/tokenizer67/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_68.sh b/dravid/tok_dravid/tok_files/tokenizer_68.sh new file mode 100644 index 0000000000000000000000000000000000000000..4f3b35eec1fa23e39e3d41546da3b65738f43b9b --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_68.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbi_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer68/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbi_splitaa \ + --output-prefix $FINAL_DIR/tokenizer68/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_7.sh b/dravid/tok_dravid/tok_files/tokenizer_7.sh new file mode 100644 index 0000000000000000000000000000000000000000..caf8ac268e0a20f1b4cad86562313d0df06285cd --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_7.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalad_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer7/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalad_splitab \ + --output-prefix $FINAL_DIR/tokenizer7/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_70.sh b/dravid/tok_dravid/tok_files/tokenizer_70.sh new file mode 100644 index 0000000000000000000000000000000000000000..2504642a61db41c634343bcc0df066612ff4f5f2 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_70.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbj_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer70/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbj_splitaa \ + --output-prefix $FINAL_DIR/tokenizer70/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_71.sh b/dravid/tok_dravid/tok_files/tokenizer_71.sh new file mode 100644 index 0000000000000000000000000000000000000000..da3e784420e8a12291ce3e0f66afb8df3bd99c95 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_71.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbj_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer71/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbj_splitab \ + --output-prefix $FINAL_DIR/tokenizer71/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_72.sh b/dravid/tok_dravid/tok_files/tokenizer_72.sh new file mode 100644 index 0000000000000000000000000000000000000000..fd1223e754bf77d5e47020e6aecd07c21437f0ae --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_72.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbk_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer72/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbk_splitaa \ + --output-prefix $FINAL_DIR/tokenizer72/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_74.sh b/dravid/tok_dravid/tok_files/tokenizer_74.sh new file mode 100644 index 0000000000000000000000000000000000000000..684504eaf6520dd85f1f9811d91e266ada0851ab --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_74.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbl_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer74/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbl_splitaa \ + --output-prefix $FINAL_DIR/tokenizer74/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_77.sh b/dravid/tok_dravid/tok_files/tokenizer_77.sh new file mode 100644 index 0000000000000000000000000000000000000000..5a4936d095fc6527e94cc59db6447b9ab3b4be03 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_77.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbm_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer77/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbm_splitab \ + --output-prefix $FINAL_DIR/tokenizer77/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_8.sh b/dravid/tok_dravid/tok_files/tokenizer_8.sh new file mode 100644 index 0000000000000000000000000000000000000000..5b6842671ce9cc24f8b2f5b6dc6b18c5f50de7e6 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_8.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalae_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer8/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalae_splitaa \ + --output-prefix $FINAL_DIR/tokenizer8/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_80.sh b/dravid/tok_dravid/tok_files/tokenizer_80.sh new file mode 100644 index 0000000000000000000000000000000000000000..493d096d2d14617ff30e89263def0761f9838c0b --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_80.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbo_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer80/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbo_splitaa \ + --output-prefix $FINAL_DIR/tokenizer80/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_81.sh b/dravid/tok_dravid/tok_files/tokenizer_81.sh new file mode 100644 index 0000000000000000000000000000000000000000..9eb4dcc64678914aed980e46abe69b55c065fc84 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_81.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbo_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer81/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbo_splitab \ + --output-prefix $FINAL_DIR/tokenizer81/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_82.sh b/dravid/tok_dravid/tok_files/tokenizer_82.sh new file mode 100644 index 0000000000000000000000000000000000000000..c7e99d36a107d82f7d7fbf8c0ceabb99f6f531f7 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_82.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbp_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer82/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbp_splitaa \ + --output-prefix $FINAL_DIR/tokenizer82/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_83.sh b/dravid/tok_dravid/tok_files/tokenizer_83.sh new file mode 100644 index 0000000000000000000000000000000000000000..116e015f28e6e6f9e840a8e9bb34f74b48d22e8c --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_83.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbp_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer83/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbp_splitab \ + --output-prefix $FINAL_DIR/tokenizer83/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_84.sh b/dravid/tok_dravid/tok_files/tokenizer_84.sh new file mode 100644 index 0000000000000000000000000000000000000000..652d2b01eda7adf35a7849a9cd333579568ed39b --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_84.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbq_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer84/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbq_splitaa \ + --output-prefix $FINAL_DIR/tokenizer84/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_88.sh b/dravid/tok_dravid/tok_files/tokenizer_88.sh new file mode 100644 index 0000000000000000000000000000000000000000..fb2e9bdea0c594ac2cd1bc9a5cf5d385658be28f --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_88.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbs_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer88/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbs_splitaa \ + --output-prefix $FINAL_DIR/tokenizer88/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_92.sh b/dravid/tok_dravid/tok_files/tokenizer_92.sh new file mode 100644 index 0000000000000000000000000000000000000000..2c7d7fc19eea1900805b01a647e8c12f3989db56 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_92.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbu_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer92/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbu_splitaa \ + --output-prefix $FINAL_DIR/tokenizer92/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_93.sh b/dravid/tok_dravid/tok_files/tokenizer_93.sh new file mode 100644 index 0000000000000000000000000000000000000000..c7976d2d7d2e4f62a1aafc678efea85927c7b0eb --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_93.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbu_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer93/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbu_splitab \ + --output-prefix $FINAL_DIR/tokenizer93/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_94.sh b/dravid/tok_dravid/tok_files/tokenizer_94.sh new file mode 100644 index 0000000000000000000000000000000000000000..189dd1dbd1904d6f02102b80fd491e6423a8f398 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_94.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbv_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer94/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbv_splitaa \ + --output-prefix $FINAL_DIR/tokenizer94/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_95.sh b/dravid/tok_dravid/tok_files/tokenizer_95.sh new file mode 100644 index 0000000000000000000000000000000000000000..f56f838e8f4f3693041c035b38f27493b87c6525 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_95.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbv_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer95/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbv_splitab \ + --output-prefix $FINAL_DIR/tokenizer95/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_96.sh b/dravid/tok_dravid/tok_files/tokenizer_96.sh new file mode 100644 index 0000000000000000000000000000000000000000..c61db7716b0d409bba7f35ab3f7b0b69a3299beb --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_96.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbw_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer96/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbw_splitaa \ + --output-prefix $FINAL_DIR/tokenizer96/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/dravid/tok_dravid/tok_files/tokenizer_99.sh b/dravid/tok_dravid/tok_files/tokenizer_99.sh new file mode 100644 index 0000000000000000000000000000000000000000..bde2d8ffc7635938079db5f18fdc7670e14240a4 --- /dev/null +++ b/dravid/tok_dravid/tok_files/tokenizer_99.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbx_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/ +TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model +VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer99/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbx_splitab \ + --output-prefix $FINAL_DIR/tokenizer99/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ +