diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_118.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_118.sh new file mode 100644 index 0000000000000000000000000000000000000000..ef35a1628850d7c80b2a4fbb428efc5a7019f0be --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_118.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitac_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer118/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitac_splitac \ + --output-prefix $FINAL_DIR/tokenizer118/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_123.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_123.sh new file mode 100644 index 0000000000000000000000000000000000000000..5319a4666634ce46307239d3f15627a01c45cdc9 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_123.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer123/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitaf \ + --output-prefix $FINAL_DIR/tokenizer123/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_124.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_124.sh new file mode 100644 index 0000000000000000000000000000000000000000..fbc8ead34cda25a3156adceb58f66017bf5f6d32 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_124.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer124/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitaa \ + --output-prefix $FINAL_DIR/tokenizer124/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_132.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_132.sh new file mode 100644 index 0000000000000000000000000000000000000000..734240a26205aaf2bc78c518135c94e18fb1a7fc --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_132.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitac_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer132/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitac_splitac \ + --output-prefix $FINAL_DIR/tokenizer132/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_135.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_135.sh new file mode 100644 index 0000000000000000000000000000000000000000..4d370699231e8eed9ec8785e6f9fa42fa07f11ae --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_135.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer135/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer135/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_141.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_141.sh new file mode 100644 index 0000000000000000000000000000000000000000..99218735cb3d56083e204d69ea8df91bb26fc5d8 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_141.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitae_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer141/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitae_splitab \ + --output-prefix $FINAL_DIR/tokenizer141/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_145.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_145.sh new file mode 100644 index 0000000000000000000000000000000000000000..fd890ce63889107bea7ee2cc7714640e63587782 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_145.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer145/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitaf \ + --output-prefix $FINAL_DIR/tokenizer145/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_192.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_192.sh new file mode 100644 index 0000000000000000000000000000000000000000..89b7071d35f85ee1136ac07389ba3f056fe220d3 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_192.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer192/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitab_splitab \ + --output-prefix $FINAL_DIR/tokenizer192/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_205.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_205.sh new file mode 100644 index 0000000000000000000000000000000000000000..60cfd4b415d8dd36d07b9cbb1d21f14806ce6b1b --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_205.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitae_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer205/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitae_splitad \ + --output-prefix $FINAL_DIR/tokenizer205/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_229.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_229.sh new file mode 100644 index 0000000000000000000000000000000000000000..04fcfcca622e6cd190de686ba90ed524f71b580a --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_229.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer229/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitaf \ + --output-prefix $FINAL_DIR/tokenizer229/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_233.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_233.sh new file mode 100644 index 0000000000000000000000000000000000000000..50b5b8e542a2a1558686e2d18a01dd8ee314297a --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_233.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitaa_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer233/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitaa_splitad \ + --output-prefix $FINAL_DIR/tokenizer233/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_246.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_246.sh new file mode 100644 index 0000000000000000000000000000000000000000..d49d4126ddc48d34235085548e10f8679ce55589 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_246.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer246/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitae \ + --output-prefix $FINAL_DIR/tokenizer246/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_248.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_248.sh new file mode 100644 index 0000000000000000000000000000000000000000..24736860df9713dfc172ba2f561bf89aee6ced13 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_248.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer248/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitaa \ + --output-prefix $FINAL_DIR/tokenizer248/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_252.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_252.sh new file mode 100644 index 0000000000000000000000000000000000000000..46d81dabe07b71285987e1e905b1fda1e4d93831 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_252.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitab_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer252/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitab_splitad \ + --output-prefix $FINAL_DIR/tokenizer252/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_257.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_257.sh new file mode 100644 index 0000000000000000000000000000000000000000..16222bf9d6da15a5413ad3b44f158dd0172981d2 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_257.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer257/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer257/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_266.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_266.sh new file mode 100644 index 0000000000000000000000000000000000000000..819779043b601ae22336077b1894c3c7f07d0003 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_266.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitae_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer266/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitae_splitac \ + --output-prefix $FINAL_DIR/tokenizer266/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_272.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_272.sh new file mode 100644 index 0000000000000000000000000000000000000000..e22afb40d3a666d7b69f49cc27460de149bb1104 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_272.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer272/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitab_splitab \ + --output-prefix $FINAL_DIR/tokenizer272/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_296.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_296.sh new file mode 100644 index 0000000000000000000000000000000000000000..079e6e037e30c7747758e4155a1fdb1bd8892eb2 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_296.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitab_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer296/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitab_splitad \ + --output-prefix $FINAL_DIR/tokenizer296/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_303.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_303.sh new file mode 100644 index 0000000000000000000000000000000000000000..5a81c72b9b4d1aba85afbf410cc1a71a1342230b --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_303.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitad_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer303/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitad_splitae \ + --output-prefix $FINAL_DIR/tokenizer303/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_320.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_320.sh new file mode 100644 index 0000000000000000000000000000000000000000..5e003ab463d8b287a8894ac5b83c8a6d462b4416 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_320.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitac_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer320/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitac_splitaa \ + --output-prefix $FINAL_DIR/tokenizer320/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_322.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_322.sh new file mode 100644 index 0000000000000000000000000000000000000000..28493ee38d7dc9ef65b0cf91cd5c9007a6f00e6a --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_322.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitac_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer322/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitac_splitac \ + --output-prefix $FINAL_DIR/tokenizer322/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_323.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_323.sh new file mode 100644 index 0000000000000000000000000000000000000000..04936c4d4cbf0793c6c4317c4c0c8385cc85c9bf --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_323.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer323/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer323/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_384.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_384.sh new file mode 100644 index 0000000000000000000000000000000000000000..574230d1d3ed889dcdda557a3f3177953cd46d90 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_384.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitaa_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer384/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitaa_splitae \ + --output-prefix $FINAL_DIR/tokenizer384/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_385.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_385.sh new file mode 100644 index 0000000000000000000000000000000000000000..16126082af30d65f64f10ab15d7477d5d2c624a5 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_385.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitab_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer385/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitab_splitaa \ + --output-prefix $FINAL_DIR/tokenizer385/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_40.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_40.sh new file mode 100644 index 0000000000000000000000000000000000000000..24e7ecacf61113a93fc802474de4fc7b4f34d4ba --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_40.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitaa_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer40/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitaa_splitaa \ + --output-prefix $FINAL_DIR/tokenizer40/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_407.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_407.sh new file mode 100644 index 0000000000000000000000000000000000000000..68a0ba47bd1e454da0be40d019bd66f7c2894cbe --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_407.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer407/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitab \ + --output-prefix $FINAL_DIR/tokenizer407/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_422.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_422.sh new file mode 100644 index 0000000000000000000000000000000000000000..1e3efc0e79c0876ffc794072b5159ea63b8aa007 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_422.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer422/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitab_splitab \ + --output-prefix $FINAL_DIR/tokenizer422/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_429.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_429.sh new file mode 100644 index 0000000000000000000000000000000000000000..f816248d33f5c808ce80a241aa46386bf940a4c8 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_429.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer429/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer429/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_433.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_433.sh new file mode 100644 index 0000000000000000000000000000000000000000..1bcfce5cc656c201959508e496ddf264009bb6f6 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_433.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitad_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer433/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitad_splitac \ + --output-prefix $FINAL_DIR/tokenizer433/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_434.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_434.sh new file mode 100644 index 0000000000000000000000000000000000000000..0203d4e7451cb6974ed85ce86e676d44bb6260ee --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_434.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitad_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer434/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitad_splitad \ + --output-prefix $FINAL_DIR/tokenizer434/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_443.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_443.sh new file mode 100644 index 0000000000000000000000000000000000000000..f08df6ddf61d8e864f63e2712405e0519829eb19 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_443.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitaa_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer443/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitaa_splitab \ + --output-prefix $FINAL_DIR/tokenizer443/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_447.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_447.sh new file mode 100644 index 0000000000000000000000000000000000000000..4caa4e5d5ce641d7cc5c5b5dfebac8f3c71c6214 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_447.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitab_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer447/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitab_splitaa \ + --output-prefix $FINAL_DIR/tokenizer447/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_451.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_451.sh new file mode 100644 index 0000000000000000000000000000000000000000..3dd02e50e4139d4aa468fb9d5bcb326bafc79ba1 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_451.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitab_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer451/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitab_splitae \ + --output-prefix $FINAL_DIR/tokenizer451/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_455.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_455.sh new file mode 100644 index 0000000000000000000000000000000000000000..fac299a245fe8fe41a0e245e7cec69ef76006032 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_455.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer455/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer455/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_492.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_492.sh new file mode 100644 index 0000000000000000000000000000000000000000..35d05179eb8079d9fe3bd91f6982a381110cc6ed --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_492.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitae_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer492/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitae_splitae \ + --output-prefix $FINAL_DIR/tokenizer492/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_50.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_50.sh new file mode 100644 index 0000000000000000000000000000000000000000..b05bf8087ba95b762b877c0ea74fc491d8ac74e8 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_50.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitac_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer50/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitac_splitaa \ + --output-prefix $FINAL_DIR/tokenizer50/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_516.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_516.sh new file mode 100644 index 0000000000000000000000000000000000000000..dea88f2d948443b0dea0328b295cfe5236a5ecd3 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_516.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitac_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer516/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitac_splitac \ + --output-prefix $FINAL_DIR/tokenizer516/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_536.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_536.sh new file mode 100644 index 0000000000000000000000000000000000000000..b5272eafe7042cdf636361f0da4c5cd09dc039ea --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_536.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer536/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitab_splitab \ + --output-prefix $FINAL_DIR/tokenizer536/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_54.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_54.sh new file mode 100644 index 0000000000000000000000000000000000000000..97312b65e5bf5059cfbaa77ec93d4218f421c151 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_54.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitac_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer54/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitac_splitae \ + --output-prefix $FINAL_DIR/tokenizer54/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_546.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_546.sh new file mode 100644 index 0000000000000000000000000000000000000000..f88163e650cd10ff19265648a646d9f2d2327328 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_546.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitae_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer546/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitae_splitaa \ + --output-prefix $FINAL_DIR/tokenizer546/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_550.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_550.sh new file mode 100644 index 0000000000000000000000000000000000000000..1744e4ff389cad564631bc0cd724592ac3ca1706 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_550.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitae_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer550/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitae_splitae \ + --output-prefix $FINAL_DIR/tokenizer550/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_552.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_552.sh new file mode 100644 index 0000000000000000000000000000000000000000..94a93bf6a8a9332a40104d7537fab17c904638a1 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_552.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitaa_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer552/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitaa_splitaa \ + --output-prefix $FINAL_DIR/tokenizer552/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_557.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_557.sh new file mode 100644 index 0000000000000000000000000000000000000000..461684254cc6ddbb3a57052ef6e6b515d350f029 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_557.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitab_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer557/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitab_splitaa \ + --output-prefix $FINAL_DIR/tokenizer557/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_558.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_558.sh new file mode 100644 index 0000000000000000000000000000000000000000..c2aeaf88c0b5e4c88e7ab80f0cfe70ac21a9faac --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_558.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer558/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitab_splitab \ + --output-prefix $FINAL_DIR/tokenizer558/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_587.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_587.sh new file mode 100644 index 0000000000000000000000000000000000000000..a2d342c1abf1892bdd1ef4e900edc4e28239c153 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_587.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitad_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer587/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitad_splitac \ + --output-prefix $FINAL_DIR/tokenizer587/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_605.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_605.sh new file mode 100644 index 0000000000000000000000000000000000000000..5b069fe0134c03102bd1d494f0b6acb35647f1e3 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_605.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitab_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer605/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitab_splitae \ + --output-prefix $FINAL_DIR/tokenizer605/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_612.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_612.sh new file mode 100644 index 0000000000000000000000000000000000000000..9fa87f6e2182ff9547e1bc15a0f5aac7ff122897 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_612.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitad_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer612/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitad_splitab \ + --output-prefix $FINAL_DIR/tokenizer612/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_621.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_621.sh new file mode 100644 index 0000000000000000000000000000000000000000..b8fce920e0d62cb16fd7bf47b3367b8ae17806b2 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_621.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer621/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitaf \ + --output-prefix $FINAL_DIR/tokenizer621/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_622.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_622.sh new file mode 100644 index 0000000000000000000000000000000000000000..c9311b956473c9dd543a452bec090d450ff68e9d --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_622.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbd_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer622/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbd_splitaa \ + --output-prefix $FINAL_DIR/tokenizer622/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_623.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_623.sh new file mode 100644 index 0000000000000000000000000000000000000000..8d71bf3ed6817fad04a8a640568a731decd10be6 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_623.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbd_splitab_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer623/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbd_splitab_splitaa \ + --output-prefix $FINAL_DIR/tokenizer623/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_624.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_624.sh new file mode 100644 index 0000000000000000000000000000000000000000..7c54f269a333f7d0c6f7c70a19f04590905c87b3 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_624.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbd_splitab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer624/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbd_splitab_splitab \ + --output-prefix $FINAL_DIR/tokenizer624/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_639.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_639.sh new file mode 100644 index 0000000000000000000000000000000000000000..10194098082e23360b3c883cb7b7e6d6ef9eb4f6 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_639.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbe_splitab_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer639/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbe_splitab_splitac \ + --output-prefix $FINAL_DIR/tokenizer639/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_641.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_641.sh new file mode 100644 index 0000000000000000000000000000000000000000..adc9f9fd7a800627cccc67e8ed70e347123605f5 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_641.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbe_splitab_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer641/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbe_splitab_splitae \ + --output-prefix $FINAL_DIR/tokenizer641/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_648.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_648.sh new file mode 100644 index 0000000000000000000000000000000000000000..ec6734599ad835f618196b81d55ec3dde5048e6f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_648.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbe_splitad_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer648/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbe_splitad_splitab \ + --output-prefix $FINAL_DIR/tokenizer648/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_651.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_651.sh new file mode 100644 index 0000000000000000000000000000000000000000..47ab100e18a7c296e9d5e401e5da5bcd7b370ac0 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_651.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbe_splitad_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer651/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbe_splitad_splitae \ + --output-prefix $FINAL_DIR/tokenizer651/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_655.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_655.sh new file mode 100644 index 0000000000000000000000000000000000000000..336aaeb69ca6516988aa17f76dbfff0a0c29001d --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_655.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbe_splitae_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer655/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbe_splitae_splitad \ + --output-prefix $FINAL_DIR/tokenizer655/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_671.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_671.sh new file mode 100644 index 0000000000000000000000000000000000000000..52138494aea42f21add0910b88c7f9efd2198470 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_671.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer671/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer671/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_683.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_683.sh new file mode 100644 index 0000000000000000000000000000000000000000..7e9079108c7392fd9fdabe4c611b16ec09f9504d --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_683.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer683/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitaf \ + --output-prefix $FINAL_DIR/tokenizer683/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_702.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_702.sh new file mode 100644 index 0000000000000000000000000000000000000000..119a0f5f9718ba22d5609f6773e8c1c33ab11fe8 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_702.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitaa_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer702/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitaa_splitaa \ + --output-prefix $FINAL_DIR/tokenizer702/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_704.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_704.sh new file mode 100644 index 0000000000000000000000000000000000000000..77b813435b0734c46e94cbf391765ee7661404ff --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_704.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitaa_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer704/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitaa_splitac \ + --output-prefix $FINAL_DIR/tokenizer704/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_73.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_73.sh new file mode 100644 index 0000000000000000000000000000000000000000..7e3440e9ebd7dcaff0549d99a630ece755a37649 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_73.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer73/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer73/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_730.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_730.sh new file mode 100644 index 0000000000000000000000000000000000000000..a3b307f5ec401803d4b7f2dcb9ec83f13acd7395 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_730.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbi_splitaa_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer730/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbi_splitaa_splitac \ + --output-prefix $FINAL_DIR/tokenizer730/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_739.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_739.sh new file mode 100644 index 0000000000000000000000000000000000000000..3097c81779c79ba843ee2afc862dd6db4ae4c9c3 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_739.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbi_splitae_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer739/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbi_splitae_splitac \ + --output-prefix $FINAL_DIR/tokenizer739/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_741.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_741.sh new file mode 100644 index 0000000000000000000000000000000000000000..2d94fb793c2ecd1a5a757b5b3204a3cb6449c4e7 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_741.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbi_splitae_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer741/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbi_splitae_splitae \ + --output-prefix $FINAL_DIR/tokenizer741/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_766.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_766.sh new file mode 100644 index 0000000000000000000000000000000000000000..3492dd9495e32c29cf99a562fe09ca12cad4ea40 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_766.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitae_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer766/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitae_splitad \ + --output-prefix $FINAL_DIR/tokenizer766/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_769.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_769.sh new file mode 100644 index 0000000000000000000000000000000000000000..46334cf88dc98236557df76b3d9382efccd7f306 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_769.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitaa_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer769/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitaa_splitaa \ + --output-prefix $FINAL_DIR/tokenizer769/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_780.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_780.sh new file mode 100644 index 0000000000000000000000000000000000000000..2f5c666d6987b72392b69099da9c0ff8b5678ba4 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_780.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitac_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer780/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitac_splitab \ + --output-prefix $FINAL_DIR/tokenizer780/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_785.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_785.sh new file mode 100644 index 0000000000000000000000000000000000000000..eb5f17c49586d5986d9a18153677ecfe49e1a4b6 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_785.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitad_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer785/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitad_splitab \ + --output-prefix $FINAL_DIR/tokenizer785/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_788.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_788.sh new file mode 100644 index 0000000000000000000000000000000000000000..3c8ec4c5bbcb781b961a4df60885e45d453492b1 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_788.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitad_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer788/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitad_splitae \ + --output-prefix $FINAL_DIR/tokenizer788/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_794.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_794.sh new file mode 100644 index 0000000000000000000000000000000000000000..f9a7cc537966f1c2f59a5cc57ef867b41cdc7ce9 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_794.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer794/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitaf \ + --output-prefix $FINAL_DIR/tokenizer794/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_800.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_800.sh new file mode 100644 index 0000000000000000000000000000000000000000..09306528b1ec02d0d0e096798d19735fc83dee44 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_800.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitab_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer800/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitab_splitaa \ + --output-prefix $FINAL_DIR/tokenizer800/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_810.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_810.sh new file mode 100644 index 0000000000000000000000000000000000000000..560f1275aedaac2f59c251e9b4510fb3787cc40f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_810.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer810/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer810/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_813.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_813.sh new file mode 100644 index 0000000000000000000000000000000000000000..0417262cac5fcac5a56ef6d911d300f836ec9322 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_813.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitad_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer813/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitad_splitad \ + --output-prefix $FINAL_DIR/tokenizer813/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_825.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_825.sh new file mode 100644 index 0000000000000000000000000000000000000000..4218d8eb3fa56180b2f0df8aeb819da3176065d9 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_825.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitab_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer825/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitab_splitad \ + --output-prefix $FINAL_DIR/tokenizer825/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_95.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_95.sh new file mode 100644 index 0000000000000000000000000000000000000000..8cacb060ac8b25af1ce460184fb5389f2b94208c --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_95.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitac_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer95/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitac_splitab \ + --output-prefix $FINAL_DIR/tokenizer95/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ +