diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_10.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_10.sh new file mode 100644 index 0000000000000000000000000000000000000000..3900702d95286c084fe3baa8f0397db7cc25a14f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_10.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitac_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer10/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitac_splitaa \ + --output-prefix $FINAL_DIR/tokenizer10/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_100.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_100.sh new file mode 100644 index 0000000000000000000000000000000000000000..8bee3a1295f73d7268c824b5e7d5d0c5d546ed99 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_100.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitad_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer100/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitad_splitab \ + --output-prefix $FINAL_DIR/tokenizer100/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_11.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_11.sh new file mode 100644 index 0000000000000000000000000000000000000000..7ae2c6ddb7018ae789b32484979312d4d9882f28 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_11.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitac_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer11/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitac_splitab \ + --output-prefix $FINAL_DIR/tokenizer11/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_152.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_152.sh new file mode 100644 index 0000000000000000000000000000000000000000..7223d3cdac5911fbfe1f68d79c8964ab672372d9 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_152.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitac_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer152/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitac_splitaa \ + --output-prefix $FINAL_DIR/tokenizer152/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_17.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_17.sh new file mode 100644 index 0000000000000000000000000000000000000000..ffed945ec725299bba424a14d81a4820abad8dc4 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_17.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer17/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitaf \ + --output-prefix $FINAL_DIR/tokenizer17/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_187.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_187.sh new file mode 100644 index 0000000000000000000000000000000000000000..f0e41edf80147e21001799543d80f4e87037f768 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_187.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitaa_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer187/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitaa_splitab \ + --output-prefix $FINAL_DIR/tokenizer187/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_194.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_194.sh new file mode 100644 index 0000000000000000000000000000000000000000..436b70457d75c78da01927988f9768bc91230a2f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_194.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitab_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer194/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitab_splitad \ + --output-prefix $FINAL_DIR/tokenizer194/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_20.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_20.sh new file mode 100644 index 0000000000000000000000000000000000000000..f6f764ccda2eb3bdc8b146656251ce16962761f3 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_20.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitaa_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer20/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitaa_splitac \ + --output-prefix $FINAL_DIR/tokenizer20/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_207.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_207.sh new file mode 100644 index 0000000000000000000000000000000000000000..e80945611ea2a2f7ff1411536765250ba01a8e1a --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_207.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer207/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitaf \ + --output-prefix $FINAL_DIR/tokenizer207/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_212.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_212.sh new file mode 100644 index 0000000000000000000000000000000000000000..f2e05084a2dab8d0fd3a64ee7b05117b4880bb9b --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_212.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitaa_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer212/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitaa_splitae \ + --output-prefix $FINAL_DIR/tokenizer212/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_231.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_231.sh new file mode 100644 index 0000000000000000000000000000000000000000..fd8835e70bde96ade3a3ffbc8561dfa174c71774 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_231.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitaa_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer231/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitaa_splitab \ + --output-prefix $FINAL_DIR/tokenizer231/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_237.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_237.sh new file mode 100644 index 0000000000000000000000000000000000000000..580e5300748549ac206769b75382146b62f87957 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_237.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitac_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer237/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitac_splitab \ + --output-prefix $FINAL_DIR/tokenizer237/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_240.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_240.sh new file mode 100644 index 0000000000000000000000000000000000000000..c7f43b28bdd7f625a6ae7846f0749ecb389e57b3 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_240.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitac_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer240/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitac_splitae \ + --output-prefix $FINAL_DIR/tokenizer240/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_26.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_26.sh new file mode 100644 index 0000000000000000000000000000000000000000..41e6c6d8282656f1bc4158be7d97ca086e72b1d8 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_26.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitab_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer26/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitab_splitad \ + --output-prefix $FINAL_DIR/tokenizer26/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_265.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_265.sh new file mode 100644 index 0000000000000000000000000000000000000000..6b461270a3c169d7e5bc97416edc898181b38e40 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_265.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitae_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer265/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitae_splitab \ + --output-prefix $FINAL_DIR/tokenizer265/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_281.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_281.sh new file mode 100644 index 0000000000000000000000000000000000000000..e606edd6b38d40f0bca7dc2f6fe3aee9e9566564 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_281.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer281/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer281/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_291.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_291.sh new file mode 100644 index 0000000000000000000000000000000000000000..c49dc7516f7216c366b88c120d55c618612d3326 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_291.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitaa_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer291/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitaa_splitad \ + --output-prefix $FINAL_DIR/tokenizer291/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_298.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_298.sh new file mode 100644 index 0000000000000000000000000000000000000000..b154e245fe1c93e88ca21e5c979214e83e687446 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_298.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer298/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitac \ + --output-prefix $FINAL_DIR/tokenizer298/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_30.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_30.sh new file mode 100644 index 0000000000000000000000000000000000000000..a33b21871e0c0c3f7559d1d187d12a2b66c0a6a2 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_30.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitac_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer30/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitac_splitac \ + --output-prefix $FINAL_DIR/tokenizer30/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_311.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_311.sh new file mode 100644 index 0000000000000000000000000000000000000000..ac94c31fe781906e291cf4dd8335f149237f617b --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_311.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitaa_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer311/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitaa_splitab \ + --output-prefix $FINAL_DIR/tokenizer311/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_327.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_327.sh new file mode 100644 index 0000000000000000000000000000000000000000..b97603d95cec7e2fb080428d0ef42ade4ea0e072 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_327.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitad_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer327/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitad_splitac \ + --output-prefix $FINAL_DIR/tokenizer327/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_333.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_333.sh new file mode 100644 index 0000000000000000000000000000000000000000..552fb44cbee4b04e3af0fa63c8576e5d2beeeaca --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_333.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitae_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer333/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitae_splitad \ + --output-prefix $FINAL_DIR/tokenizer333/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_335.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_335.sh new file mode 100644 index 0000000000000000000000000000000000000000..4c9f74ecc4a8b3454f8ac379d4808aee3ef390f8 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_335.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer335/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitaf \ + --output-prefix $FINAL_DIR/tokenizer335/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_339.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_339.sh new file mode 100644 index 0000000000000000000000000000000000000000..9cf048c099b6d3086a202950b8c1898905dfc532 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_339.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitaa_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer339/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitaa_splitad \ + --output-prefix $FINAL_DIR/tokenizer339/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_342.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_342.sh new file mode 100644 index 0000000000000000000000000000000000000000..f2ab53d51333460609c3eaee04949b3d79da484d --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_342.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer342/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitab_splitab \ + --output-prefix $FINAL_DIR/tokenizer342/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_346.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_346.sh new file mode 100644 index 0000000000000000000000000000000000000000..ffd9920b10525ce0d6771839910e8e869f643cb3 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_346.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitac_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer346/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitac_splitaa \ + --output-prefix $FINAL_DIR/tokenizer346/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_357.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_357.sh new file mode 100644 index 0000000000000000000000000000000000000000..c74d2f2efc1259344f07ade1a6806ef8828be6db --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_357.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer357/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitaf \ + --output-prefix $FINAL_DIR/tokenizer357/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_360.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_360.sh new file mode 100644 index 0000000000000000000000000000000000000000..eaf3181977fb36351cc790c9265c3c1f6b90e01f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_360.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitaa_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer360/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitaa_splitac \ + --output-prefix $FINAL_DIR/tokenizer360/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_369.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_369.sh new file mode 100644 index 0000000000000000000000000000000000000000..6f8b56d8c9b800188470fcafa4c3638cd7b11ff7 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_369.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitac_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer369/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitac_splitab \ + --output-prefix $FINAL_DIR/tokenizer369/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_378.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_378.sh new file mode 100644 index 0000000000000000000000000000000000000000..e5105787d5535500d32ea424bc671edb096f19f3 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_378.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer378/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitae \ + --output-prefix $FINAL_DIR/tokenizer378/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_396.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_396.sh new file mode 100644 index 0000000000000000000000000000000000000000..f382627a66865cb92f3291b302558ec37f6440ea --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_396.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitad_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer396/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitad_splitab \ + --output-prefix $FINAL_DIR/tokenizer396/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_417.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_417.sh new file mode 100644 index 0000000000000000000000000000000000000000..e4f37f09c6428120a5f21483d54f27da73f8387b --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_417.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitae_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer417/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitae_splitad \ + --output-prefix $FINAL_DIR/tokenizer417/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_419.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_419.sh new file mode 100644 index 0000000000000000000000000000000000000000..7a7e6ae625969971a0138594714364695e7237e6 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_419.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer419/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitaf \ + --output-prefix $FINAL_DIR/tokenizer419/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_425.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_425.sh new file mode 100644 index 0000000000000000000000000000000000000000..4a25a12a4272a81213ead7a377b735243e6c7e71 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_425.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitab_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer425/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitab_splitae \ + --output-prefix $FINAL_DIR/tokenizer425/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_438.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_438.sh new file mode 100644 index 0000000000000000000000000000000000000000..aeb0518a6cf4cc31482a5edd8e478a63b206a016 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_438.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitae_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer438/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitae_splitac \ + --output-prefix $FINAL_DIR/tokenizer438/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_446.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_446.sh new file mode 100644 index 0000000000000000000000000000000000000000..5ff2708478a5d86ba0116b9e4f8ad93a4ad89e8c --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_446.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitaa_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer446/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitaa_splitae \ + --output-prefix $FINAL_DIR/tokenizer446/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_45.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_45.sh new file mode 100644 index 0000000000000000000000000000000000000000..d1c8c48c7c4c0360d424985cae37b7c7abcd8a91 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_45.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitab_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer45/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitab_splitaa \ + --output-prefix $FINAL_DIR/tokenizer45/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_46.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_46.sh new file mode 100644 index 0000000000000000000000000000000000000000..b54bc04f8d8e43bf135a266798e53ba2e289aeb5 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_46.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer46/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitab_splitab \ + --output-prefix $FINAL_DIR/tokenizer46/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_473.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_473.sh new file mode 100644 index 0000000000000000000000000000000000000000..c0d681f18ef5ca9c264a5dcac17fd3426eb8479b --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_473.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitab_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer473/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitab_splitaa \ + --output-prefix $FINAL_DIR/tokenizer473/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_475.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_475.sh new file mode 100644 index 0000000000000000000000000000000000000000..bde7209cbf7c9755d1b4ddd63f7b657607c2cd3b --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_475.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitab_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer475/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitab_splitac \ + --output-prefix $FINAL_DIR/tokenizer475/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_477.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_477.sh new file mode 100644 index 0000000000000000000000000000000000000000..cdb05304fc3a626e1e3ec4f0cfde5241bf6344ac --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_477.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitab_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer477/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitab_splitae \ + --output-prefix $FINAL_DIR/tokenizer477/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_5.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_5.sh new file mode 100644 index 0000000000000000000000000000000000000000..66990e2d9abd48f1b3b4cbfcabc5aaed09ae8fca --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_5.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitab_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer5/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitab_splitaa \ + --output-prefix $FINAL_DIR/tokenizer5/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_519.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_519.sh new file mode 100644 index 0000000000000000000000000000000000000000..8551c222fa01f146271efd10e90462e72b227601 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_519.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer519/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer519/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_541.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_541.sh new file mode 100644 index 0000000000000000000000000000000000000000..c59a2ac1c950659f3ad2f8920135a2642547094d --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_541.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer541/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer541/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_545.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_545.sh new file mode 100644 index 0000000000000000000000000000000000000000..81f91a4f3ad56e3853838565934b614756e606c7 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_545.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitad_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer545/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitad_splitae \ + --output-prefix $FINAL_DIR/tokenizer545/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_570.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_570.sh new file mode 100644 index 0000000000000000000000000000000000000000..bb3a36754958917a120beb74e228ac104d5073ea --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_570.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitad_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer570/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitad_splitad \ + --output-prefix $FINAL_DIR/tokenizer570/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_571.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_571.sh new file mode 100644 index 0000000000000000000000000000000000000000..b84492bae6e544068233cdb80c9bda9b8453c039 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_571.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitad_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer571/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitad_splitae \ + --output-prefix $FINAL_DIR/tokenizer571/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_59.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_59.sh new file mode 100644 index 0000000000000000000000000000000000000000..d960cb35940107792cfebf3092cfcfe2cfa3eb15 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_59.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitaa_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer59/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitaa_splitab \ + --output-prefix $FINAL_DIR/tokenizer59/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_595.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_595.sh new file mode 100644 index 0000000000000000000000000000000000000000..9d8e5bad4cfd94badb611d128954ea7369e9ae3e --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_595.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer595/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitaf \ + --output-prefix $FINAL_DIR/tokenizer595/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_6.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_6.sh new file mode 100644 index 0000000000000000000000000000000000000000..839b5d2eb858519953ce01309d2e0ad65971876a --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_6.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer6/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitab_splitab \ + --output-prefix $FINAL_DIR/tokenizer6/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_633.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_633.sh new file mode 100644 index 0000000000000000000000000000000000000000..d62ee31c43baa77987b28b534707bbb1bb207815 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_633.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbd_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer633/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbd_splitad \ + --output-prefix $FINAL_DIR/tokenizer633/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_690.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_690.sh new file mode 100644 index 0000000000000000000000000000000000000000..9a7e113457e1f54102aa4a86c20b13db2a81f966 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_690.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitac_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer690/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitac_splitae \ + --output-prefix $FINAL_DIR/tokenizer690/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_691.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_691.sh new file mode 100644 index 0000000000000000000000000000000000000000..b653167b8c4f4283886f086a6da78b6a6d8c7a5c --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_691.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer691/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer691/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_692.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_692.sh new file mode 100644 index 0000000000000000000000000000000000000000..231f0bc758cc74c733ef7d0d9cc1ad1215e120c7 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_692.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitad_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer692/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitad_splitab \ + --output-prefix $FINAL_DIR/tokenizer692/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_695.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_695.sh new file mode 100644 index 0000000000000000000000000000000000000000..49db58e28cb78471ca2431f1918b82b0a3b94bfd --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_695.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitad_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer695/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitad_splitae \ + --output-prefix $FINAL_DIR/tokenizer695/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_712.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_712.sh new file mode 100644 index 0000000000000000000000000000000000000000..c657729b97634ed33b9f8296a531bb60452af21b --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_712.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitac_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer712/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitac_splitaa \ + --output-prefix $FINAL_DIR/tokenizer712/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_721.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_721.sh new file mode 100644 index 0000000000000000000000000000000000000000..e5e7391135a2b8caa6b366941435131f91219a86 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_721.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitad_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer721/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitad_splitae \ + --output-prefix $FINAL_DIR/tokenizer721/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_722.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_722.sh new file mode 100644 index 0000000000000000000000000000000000000000..547fbe8969ca06fdb9f34d891621b712a92f0571 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_722.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitae_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer722/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitae_splitaa \ + --output-prefix $FINAL_DIR/tokenizer722/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_734.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_734.sh new file mode 100644 index 0000000000000000000000000000000000000000..4d3b111cb08aee7a82e8bcf5736fa8775d21c790 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_734.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbi_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer734/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbi_splitac \ + --output-prefix $FINAL_DIR/tokenizer734/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_755.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_755.sh new file mode 100644 index 0000000000000000000000000000000000000000..d2aab1472f0877a3b8a6ed9ebc3d45dea6fbef1c --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_755.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitac_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer755/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitac_splitac \ + --output-prefix $FINAL_DIR/tokenizer755/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_757.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_757.sh new file mode 100644 index 0000000000000000000000000000000000000000..0fe32fa4e42f4514d822751f0a7da4feb696b633 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_757.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitac_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer757/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitac_splitae \ + --output-prefix $FINAL_DIR/tokenizer757/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_759.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_759.sh new file mode 100644 index 0000000000000000000000000000000000000000..2b6fd95d533647448ac20c7145f1675349666632 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_759.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitad_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer759/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitad_splitab \ + --output-prefix $FINAL_DIR/tokenizer759/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_765.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_765.sh new file mode 100644 index 0000000000000000000000000000000000000000..e8dfe61910d82fc94138ba8a73e83f3f23532814 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_765.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitae_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer765/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitae_splitac \ + --output-prefix $FINAL_DIR/tokenizer765/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_773.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_773.sh new file mode 100644 index 0000000000000000000000000000000000000000..eea876ee5198140591172b6c1304d6dc0ddf16fd --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_773.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitaa_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer773/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitaa_splitae \ + --output-prefix $FINAL_DIR/tokenizer773/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_782.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_782.sh new file mode 100644 index 0000000000000000000000000000000000000000..b397dfaed75b1f2bb5c79052de6b5fd03471fd82 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_782.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer782/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer782/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_786.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_786.sh new file mode 100644 index 0000000000000000000000000000000000000000..8bd50f64574a5f2a69c59b696da30c067274a5ed --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_786.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitad_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer786/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitad_splitac \ + --output-prefix $FINAL_DIR/tokenizer786/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_80.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_80.sh new file mode 100644 index 0000000000000000000000000000000000000000..7cbb3d1b43a3ddebffc36619f70e5c0b959569cd --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_80.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitae_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer80/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitae_splitac \ + --output-prefix $FINAL_DIR/tokenizer80/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_806.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_806.sh new file mode 100644 index 0000000000000000000000000000000000000000..bc490893c5dde4f8447a27034f03f3a1dd79a646 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_806.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitac_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer806/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitac_splitab \ + --output-prefix $FINAL_DIR/tokenizer806/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_809.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_809.sh new file mode 100644 index 0000000000000000000000000000000000000000..02a1f9cc072f9b874ac63a51707a56a07aa963c5 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_809.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitac_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer809/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitac_splitae \ + --output-prefix $FINAL_DIR/tokenizer809/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_811.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_811.sh new file mode 100644 index 0000000000000000000000000000000000000000..dd4eb97bb9a166d1d7c09d370b9761a972bb7a3d --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_811.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitad_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer811/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitad_splitab \ + --output-prefix $FINAL_DIR/tokenizer811/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_820.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_820.sh new file mode 100644 index 0000000000000000000000000000000000000000..9eb6f0fcc4909120b0b1c929c8cfb114b9c528c5 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_820.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitaa_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer820/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitaa_splitad \ + --output-prefix $FINAL_DIR/tokenizer820/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_823.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_823.sh new file mode 100644 index 0000000000000000000000000000000000000000..ba2eaa794a1590a17dcef38934967ec155f98b3f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_823.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer823/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitab_splitab \ + --output-prefix $FINAL_DIR/tokenizer823/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_824.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_824.sh new file mode 100644 index 0000000000000000000000000000000000000000..1213342288982c35ae5bd9f23f3be5f35a787095 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_824.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitab_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer824/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitab_splitac \ + --output-prefix $FINAL_DIR/tokenizer824/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_831.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_831.sh new file mode 100644 index 0000000000000000000000000000000000000000..6b527bd70b5dbc60c48fadffd351e91c2650a048 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_831.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitac_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer831/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitac_splitae \ + --output-prefix $FINAL_DIR/tokenizer831/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_86.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_86.sh new file mode 100644 index 0000000000000000000000000000000000000000..a3711da36749aa7c36bf74c96d0f1e54db43dc1f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_86.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitaa_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer86/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitaa_splitac \ + --output-prefix $FINAL_DIR/tokenizer86/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ +