diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_102.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_102.sh new file mode 100644 index 0000000000000000000000000000000000000000..71958face8355c30a68639ba3a930bc164187a38 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_102.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitad_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer102/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitad_splitad \ + --output-prefix $FINAL_DIR/tokenizer102/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_110.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_110.sh new file mode 100644 index 0000000000000000000000000000000000000000..831dbe48291939284d90cf7da6b4929caf3b7a32 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_110.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitaa_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer110/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitaa_splitaa \ + --output-prefix $FINAL_DIR/tokenizer110/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_115.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_115.sh new file mode 100644 index 0000000000000000000000000000000000000000..01676454afa1eecf597979db5e04d9d2ca229cc6 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_115.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer115/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitab \ + --output-prefix $FINAL_DIR/tokenizer115/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_13.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_13.sh new file mode 100644 index 0000000000000000000000000000000000000000..e94ba17c5c03f563162e56c552ea525c965e1512 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_13.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer13/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer13/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_138.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_138.sh new file mode 100644 index 0000000000000000000000000000000000000000..8aad118c604003aba6b77fc717182e7880f074a8 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_138.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitad_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer138/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitad_splitad \ + --output-prefix $FINAL_DIR/tokenizer138/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_171.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_171.sh new file mode 100644 index 0000000000000000000000000000000000000000..20e5895a73b7d38ef778d91bc1c2dcc2e3a06959 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_171.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitac_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer171/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitac_splitab \ + --output-prefix $FINAL_DIR/tokenizer171/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_182.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_182.sh new file mode 100644 index 0000000000000000000000000000000000000000..6f524a4ccf70749ac3583875fdd07e6fd42053fe --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_182.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitae_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer182/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitae_splitac \ + --output-prefix $FINAL_DIR/tokenizer182/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_185.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_185.sh new file mode 100644 index 0000000000000000000000000000000000000000..3b6b6fb0904cfc8b021c0bf1eef952767853c332 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_185.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer185/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitaf \ + --output-prefix $FINAL_DIR/tokenizer185/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_186.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_186.sh new file mode 100644 index 0000000000000000000000000000000000000000..c298ddd1823c665a874f45f8b25b3d677d545767 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_186.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitaa_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer186/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitaa_splitaa \ + --output-prefix $FINAL_DIR/tokenizer186/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_188.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_188.sh new file mode 100644 index 0000000000000000000000000000000000000000..3d61952d6bb92f044dc2a3e1f2640a08472020bf --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_188.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitaa_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer188/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitaa_splitac \ + --output-prefix $FINAL_DIR/tokenizer188/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_198.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_198.sh new file mode 100644 index 0000000000000000000000000000000000000000..eac2ca16b87533a69dcb131dcbad9fc1fa5ae2dd --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_198.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitac_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer198/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitac_splitac \ + --output-prefix $FINAL_DIR/tokenizer198/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_2.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_2.sh new file mode 100644 index 0000000000000000000000000000000000000000..802c3a2d7af6c73bcbfb44ce49e577601ceed2e9 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_2.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitaa_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer2/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitaa_splitac \ + --output-prefix $FINAL_DIR/tokenizer2/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_219.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_219.sh new file mode 100644 index 0000000000000000000000000000000000000000..f9fec085616d5e643970565bc02542fa79198c28 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_219.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer219/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer219/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_235.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_235.sh new file mode 100644 index 0000000000000000000000000000000000000000..43eeb3fec58beecb135bd855b25cef57658bb434 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_235.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer235/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitab \ + --output-prefix $FINAL_DIR/tokenizer235/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_236.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_236.sh new file mode 100644 index 0000000000000000000000000000000000000000..78033590afd6e141383d1ec2793551603568fe13 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_236.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitac_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer236/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitac_splitaa \ + --output-prefix $FINAL_DIR/tokenizer236/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_242.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_242.sh new file mode 100644 index 0000000000000000000000000000000000000000..4e9477aec9f2d2ae1dba4f785230e521545cd2a0 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_242.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitad_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer242/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitad_splitab \ + --output-prefix $FINAL_DIR/tokenizer242/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_244.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_244.sh new file mode 100644 index 0000000000000000000000000000000000000000..5aa500a3b437321bfff28a9e1fa207cbdc8f37f6 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_244.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitad_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer244/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitad_splitad \ + --output-prefix $FINAL_DIR/tokenizer244/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_253.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_253.sh new file mode 100644 index 0000000000000000000000000000000000000000..8c41d4122d63ae8728b4ce759f7e5e6ace1fa2e8 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_253.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitab_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer253/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitab_splitae \ + --output-prefix $FINAL_DIR/tokenizer253/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_256.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_256.sh new file mode 100644 index 0000000000000000000000000000000000000000..2bdf6f94ca6473a29419d1d27a1c8af489504e58 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_256.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitac_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer256/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitac_splitac \ + --output-prefix $FINAL_DIR/tokenizer256/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_261.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_261.sh new file mode 100644 index 0000000000000000000000000000000000000000..1b5d0acdfaa941e9b0ab3844a3e6b210ce67b690 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_261.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitad_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer261/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitad_splitac \ + --output-prefix $FINAL_DIR/tokenizer261/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_285.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_285.sh new file mode 100644 index 0000000000000000000000000000000000000000..5d2e22b85908dd750586026a5ca6600ff50aed82 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_285.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitad_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer285/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitad_splitae \ + --output-prefix $FINAL_DIR/tokenizer285/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_292.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_292.sh new file mode 100644 index 0000000000000000000000000000000000000000..673b97546a505e1e7b205df206b67f17650d8ec1 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_292.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitaa_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer292/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitaa_splitae \ + --output-prefix $FINAL_DIR/tokenizer292/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_297.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_297.sh new file mode 100644 index 0000000000000000000000000000000000000000..a1b70ecf2959e892cdd33f581d067d6c42a2ec97 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_297.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitab_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer297/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitab_splitae \ + --output-prefix $FINAL_DIR/tokenizer297/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_3.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_3.sh new file mode 100644 index 0000000000000000000000000000000000000000..d00162311e7c254a1924993c8dc35d1239bddd7d --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_3.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitaa_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer3/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitaa_splitad \ + --output-prefix $FINAL_DIR/tokenizer3/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_302.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_302.sh new file mode 100644 index 0000000000000000000000000000000000000000..318fc6c8967063ec4557b6165c5d3b10b45359be --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_302.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitad_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer302/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitad_splitad \ + --output-prefix $FINAL_DIR/tokenizer302/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_309.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_309.sh new file mode 100644 index 0000000000000000000000000000000000000000..bc3661df776cf9c29a759ae5953cceab29d4c0d6 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_309.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer309/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitaf \ + --output-prefix $FINAL_DIR/tokenizer309/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_31.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_31.sh new file mode 100644 index 0000000000000000000000000000000000000000..a3a4ac97ae14b15ef6e3247ae22dc510652538f3 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_31.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer31/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer31/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_318.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_318.sh new file mode 100644 index 0000000000000000000000000000000000000000..f0d3b4bdccce48d74769c6ea8dd1cd88b678ad25 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_318.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitab_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer318/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitab_splitad \ + --output-prefix $FINAL_DIR/tokenizer318/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_329.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_329.sh new file mode 100644 index 0000000000000000000000000000000000000000..682415e791403aa374105af6d90c4f9b1e09d4b7 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_329.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitad_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer329/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitad_splitae \ + --output-prefix $FINAL_DIR/tokenizer329/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_340.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_340.sh new file mode 100644 index 0000000000000000000000000000000000000000..ae726bf0601eb8fd796ea0c9d9af2ff7e531bfc0 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_340.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitaa_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer340/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitaa_splitae \ + --output-prefix $FINAL_DIR/tokenizer340/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_345.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_345.sh new file mode 100644 index 0000000000000000000000000000000000000000..5d2522db289544bec289a819996a9334ff3ba673 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_345.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitab_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer345/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitab_splitae \ + --output-prefix $FINAL_DIR/tokenizer345/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_350.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_350.sh new file mode 100644 index 0000000000000000000000000000000000000000..ddfdb25179be638098e2190a9e01274bcaef6b0a --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_350.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitac_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer350/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitac_splitae \ + --output-prefix $FINAL_DIR/tokenizer350/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_367.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_367.sh new file mode 100644 index 0000000000000000000000000000000000000000..7aa636d097e77a80b36985d4e32326261a64abc9 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_367.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitab_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer367/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitab_splitae \ + --output-prefix $FINAL_DIR/tokenizer367/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_368.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_368.sh new file mode 100644 index 0000000000000000000000000000000000000000..bc574dbf0401af9ac2cc018d9d26cf8e1d05cb1a --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_368.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitac_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer368/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitac_splitaa \ + --output-prefix $FINAL_DIR/tokenizer368/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_374.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_374.sh new file mode 100644 index 0000000000000000000000000000000000000000..6d82f3495b4cea5937f9c03a03649ea73c8e513f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_374.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitad_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer374/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitad_splitab \ + --output-prefix $FINAL_DIR/tokenizer374/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_395.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_395.sh new file mode 100644 index 0000000000000000000000000000000000000000..5fbc62317516ee40896d578f5e18abed410796a7 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_395.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer395/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer395/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_397.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_397.sh new file mode 100644 index 0000000000000000000000000000000000000000..030e1f84409106447af085135afe457bd8926359 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_397.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitad_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer397/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitad_splitac \ + --output-prefix $FINAL_DIR/tokenizer397/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_400.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_400.sh new file mode 100644 index 0000000000000000000000000000000000000000..0eab0edfa6d188d4342980dbb58df628304cea0b --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_400.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitae_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer400/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitae_splitaa \ + --output-prefix $FINAL_DIR/tokenizer400/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_401.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_401.sh new file mode 100644 index 0000000000000000000000000000000000000000..e33d563a059dc54503c5445fb9e69e110b9653de --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_401.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitae_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer401/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitae_splitab \ + --output-prefix $FINAL_DIR/tokenizer401/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_404.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_404.sh new file mode 100644 index 0000000000000000000000000000000000000000..62ead5eac2eb0bb1990db534f92ee4c8cd06c0c0 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_404.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitae_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer404/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitae_splitae \ + --output-prefix $FINAL_DIR/tokenizer404/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_423.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_423.sh new file mode 100644 index 0000000000000000000000000000000000000000..8fd9783eae150b9887f192e0f6ef666768e7a552 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_423.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitab_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer423/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitab_splitac \ + --output-prefix $FINAL_DIR/tokenizer423/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_43.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_43.sh new file mode 100644 index 0000000000000000000000000000000000000000..d8a7f2bd2ab6841230a72209cdd986e493dda5cf --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_43.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitaa_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer43/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitaa_splitad \ + --output-prefix $FINAL_DIR/tokenizer43/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_444.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_444.sh new file mode 100644 index 0000000000000000000000000000000000000000..7c4b3c55d6253ee7bda67a9b5d86168f18136e4d --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_444.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitaa_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer444/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitaa_splitac \ + --output-prefix $FINAL_DIR/tokenizer444/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_465.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_465.sh new file mode 100644 index 0000000000000000000000000000000000000000..e48670257b7d230f3a91cbee9f9565d699aa4fe4 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_465.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitae_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer465/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitae_splitad \ + --output-prefix $FINAL_DIR/tokenizer465/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_518.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_518.sh new file mode 100644 index 0000000000000000000000000000000000000000..f5a202c1e6e11a73d1f6945216a1f81f8c62b00e --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_518.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitac_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer518/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitac_splitae \ + --output-prefix $FINAL_DIR/tokenizer518/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_579.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_579.sh new file mode 100644 index 0000000000000000000000000000000000000000..ad3e53d5f51ee3670701e5e353eef7331124c17e --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_579.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitab_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer579/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitab_splitae \ + --output-prefix $FINAL_DIR/tokenizer579/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_600.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_600.sh new file mode 100644 index 0000000000000000000000000000000000000000..7558c66b17fc72801dac8cbc096e4f9d484c0b29 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_600.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitaa_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer600/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitaa_splitae \ + --output-prefix $FINAL_DIR/tokenizer600/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_604.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_604.sh new file mode 100644 index 0000000000000000000000000000000000000000..8778e53fcaf7cea7be28640a14f4aab4823c5fcb --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_604.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitab_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer604/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitab_splitad \ + --output-prefix $FINAL_DIR/tokenizer604/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_608.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_608.sh new file mode 100644 index 0000000000000000000000000000000000000000..158bacb22b521405a581bfe2f8bd82d9e164ecec --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_608.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitac_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer608/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitac_splitac \ + --output-prefix $FINAL_DIR/tokenizer608/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_610.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_610.sh new file mode 100644 index 0000000000000000000000000000000000000000..4c6427a21598562917bda1eec2d98b1634fcafd7 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_610.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitac_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer610/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitac_splitae \ + --output-prefix $FINAL_DIR/tokenizer610/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_659.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_659.sh new file mode 100644 index 0000000000000000000000000000000000000000..924011bd8229872a601c2c7cae0e631b55ac77b3 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_659.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitaa_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer659/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitaa_splitab \ + --output-prefix $FINAL_DIR/tokenizer659/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_662.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_662.sh new file mode 100644 index 0000000000000000000000000000000000000000..1e1072a881fe477f5e3efe6765e5faea84f94ec8 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_662.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitaa_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer662/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitaa_splitae \ + --output-prefix $FINAL_DIR/tokenizer662/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_667.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_667.sh new file mode 100644 index 0000000000000000000000000000000000000000..7665e7a15fa760e67a677612aa1002bcc1a43246 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_667.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitab_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer667/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitab_splitae \ + --output-prefix $FINAL_DIR/tokenizer667/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_68.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_68.sh new file mode 100644 index 0000000000000000000000000000000000000000..9556aff0dc7dee670262b52c95a1a47ebe822b22 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_68.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitac_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer68/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitac_splitaa \ + --output-prefix $FINAL_DIR/tokenizer68/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_685.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_685.sh new file mode 100644 index 0000000000000000000000000000000000000000..7579d59146938c6d0f51906d597a60811504d715 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_685.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer685/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitab \ + --output-prefix $FINAL_DIR/tokenizer685/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_689.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_689.sh new file mode 100644 index 0000000000000000000000000000000000000000..5f9588a256230ae28917849f634a89edc1c50796 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_689.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer689/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer689/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_69.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_69.sh new file mode 100644 index 0000000000000000000000000000000000000000..dbf6d2fcde6415db1824a1bfcc193a5a9f089329 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_69.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitac_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer69/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitac_splitab \ + --output-prefix $FINAL_DIR/tokenizer69/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_705.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_705.sh new file mode 100644 index 0000000000000000000000000000000000000000..981943d81df788a7893689576d37b0c24fce573e --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_705.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitaa_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer705/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitaa_splitad \ + --output-prefix $FINAL_DIR/tokenizer705/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_709.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_709.sh new file mode 100644 index 0000000000000000000000000000000000000000..8ba5562042bb6db7d867c1bd6c97bab01248fab9 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_709.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitab_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer709/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitab_splitac \ + --output-prefix $FINAL_DIR/tokenizer709/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_719.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_719.sh new file mode 100644 index 0000000000000000000000000000000000000000..c7f6bce6e551cc5ed39c6c01f20fef83bb45fbd2 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_719.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitad_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer719/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitad_splitac \ + --output-prefix $FINAL_DIR/tokenizer719/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_727.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_727.sh new file mode 100644 index 0000000000000000000000000000000000000000..e45c19c12ab686068f186f84f6ffc6ed111f0546 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_727.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer727/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitaf \ + --output-prefix $FINAL_DIR/tokenizer727/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_731.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_731.sh new file mode 100644 index 0000000000000000000000000000000000000000..cc0fd50fcbf768ef1c4f6a31d2be1a78e6bfcc57 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_731.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbi_splitaa_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer731/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbi_splitaa_splitad \ + --output-prefix $FINAL_DIR/tokenizer731/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_743.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_743.sh new file mode 100644 index 0000000000000000000000000000000000000000..3a7f04da738e82310dab89a1aba9f44687f63b9a --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_743.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitaa_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer743/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitaa_splitaa \ + --output-prefix $FINAL_DIR/tokenizer743/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_763.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_763.sh new file mode 100644 index 0000000000000000000000000000000000000000..1c32d200c4b3acda031b6ff6658d6965109c5fb5 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_763.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitae_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer763/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitae_splitaa \ + --output-prefix $FINAL_DIR/tokenizer763/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_77.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_77.sh new file mode 100644 index 0000000000000000000000000000000000000000..774d361746d6de0e153a67f90f2ee720ed2225bd --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_77.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitad_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer77/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitad_splitae \ + --output-prefix $FINAL_DIR/tokenizer77/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_783.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_783.sh new file mode 100644 index 0000000000000000000000000000000000000000..e48329ea3e302e56ec12eee60d4e285270dd9304 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_783.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitac_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer783/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitac_splitae \ + --output-prefix $FINAL_DIR/tokenizer783/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_784.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_784.sh new file mode 100644 index 0000000000000000000000000000000000000000..2dc85a8d6e48680075f5c7657212630647f4772c --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_784.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer784/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer784/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_790.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_790.sh new file mode 100644 index 0000000000000000000000000000000000000000..dfa16ec5801f8b11bf2a8fffe11336c009d8e27a --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_790.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitae_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer790/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitae_splitab \ + --output-prefix $FINAL_DIR/tokenizer790/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_798.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_798.sh new file mode 100644 index 0000000000000000000000000000000000000000..5492d3c9dd6933edb4c80fe9bf2112d4df4cd20f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_798.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitaa_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer798/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitaa_splitad \ + --output-prefix $FINAL_DIR/tokenizer798/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_8.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_8.sh new file mode 100644 index 0000000000000000000000000000000000000000..91b0fc13058d8a4c150934b709d959c72f67c147 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_8.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitab_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer8/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitab_splitad \ + --output-prefix $FINAL_DIR/tokenizer8/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_803.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_803.sh new file mode 100644 index 0000000000000000000000000000000000000000..0606b2b50880384a31bc9b6f4ca690ea4d1f9bb3 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_803.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitab_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer803/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitab_splitad \ + --output-prefix $FINAL_DIR/tokenizer803/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_832.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_832.sh new file mode 100644 index 0000000000000000000000000000000000000000..f24599775fc4b5914360866693b101776d2ddb55 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_832.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer832/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitad \ + --output-prefix $FINAL_DIR/tokenizer832/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_84.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_84.sh new file mode 100644 index 0000000000000000000000000000000000000000..794285a36e9326c0bcd5756722b2a10c85218fb4 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_84.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitaa_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer84/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitaa_splitaa \ + --output-prefix $FINAL_DIR/tokenizer84/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_87.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_87.sh new file mode 100644 index 0000000000000000000000000000000000000000..05431bc4bb8324879f3107c55ba34817aef7b377 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_87.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitaa_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer87/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitaa_splitad \ + --output-prefix $FINAL_DIR/tokenizer87/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_99.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_99.sh new file mode 100644 index 0000000000000000000000000000000000000000..e092e561e30775dad963d562a47155072d6cca28 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_99.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer99/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer99/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ +