diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_1.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_1.sh new file mode 100644 index 0000000000000000000000000000000000000000..b81d41d14ab5119c852481557432b6626cf7b4c9 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_1.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitaa_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer1/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitaa_splitab \ + --output-prefix $FINAL_DIR/tokenizer1/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_116.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_116.sh new file mode 100644 index 0000000000000000000000000000000000000000..f14ff62ddbcfac13ccc099af5935d8d8bcd4af14 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_116.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitac_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer116/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitac_splitaa \ + --output-prefix $FINAL_DIR/tokenizer116/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_133.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_133.sh new file mode 100644 index 0000000000000000000000000000000000000000..8f69adcecd624014b59e731fbcb1222096c0df62 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_133.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer133/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer133/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_144.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_144.sh new file mode 100644 index 0000000000000000000000000000000000000000..eba585393bfcf896a90f919a176644a109f264ad --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_144.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitae_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer144/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitae_splitae \ + --output-prefix $FINAL_DIR/tokenizer144/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_155.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_155.sh new file mode 100644 index 0000000000000000000000000000000000000000..c85582b3d5c2113d21d4224e8d7dbc8262d92b44 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_155.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer155/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer155/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_190.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_190.sh new file mode 100644 index 0000000000000000000000000000000000000000..fe95b3a8a16d26798dbcdaa38fac3eaade1c841b --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_190.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitaa_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer190/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitaa_splitae \ + --output-prefix $FINAL_DIR/tokenizer190/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_202.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_202.sh new file mode 100644 index 0000000000000000000000000000000000000000..e43af37817a69f112ead6c8bef5eb2c37f3d8955 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_202.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitae_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer202/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitae_splitaa \ + --output-prefix $FINAL_DIR/tokenizer202/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_203.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_203.sh new file mode 100644 index 0000000000000000000000000000000000000000..a90f5d62bb471e366a966b6683358870320e5835 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_203.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitae_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer203/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitae_splitab \ + --output-prefix $FINAL_DIR/tokenizer203/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_211.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_211.sh new file mode 100644 index 0000000000000000000000000000000000000000..5bb1367143c57d9c887b1ac7c218d4593322106f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_211.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitaa_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer211/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitaa_splitad \ + --output-prefix $FINAL_DIR/tokenizer211/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_216.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_216.sh new file mode 100644 index 0000000000000000000000000000000000000000..ae4f8c53b68d5b5f395eed24c36c4f67940bd1a7 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_216.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitac_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer216/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitac_splitac \ + --output-prefix $FINAL_DIR/tokenizer216/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_224.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_224.sh new file mode 100644 index 0000000000000000000000000000000000000000..cbcda210f80d9c238c8633d5b34fced7f10e24b0 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_224.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitae_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer224/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitae_splitaa \ + --output-prefix $FINAL_DIR/tokenizer224/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_239.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_239.sh new file mode 100644 index 0000000000000000000000000000000000000000..e8c23d82764018f3e3a0614e8b28b710b85cb508 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_239.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer239/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer239/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_267.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_267.sh new file mode 100644 index 0000000000000000000000000000000000000000..ec21f7d07e64133c568a8585bfc79fd3cb9cb9bb --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_267.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitae_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer267/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitae_splitad \ + --output-prefix $FINAL_DIR/tokenizer267/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_269.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_269.sh new file mode 100644 index 0000000000000000000000000000000000000000..953926153575d5bff7fbf6f6946e8931be80ba46 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_269.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer269/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitaf \ + --output-prefix $FINAL_DIR/tokenizer269/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_28.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_28.sh new file mode 100644 index 0000000000000000000000000000000000000000..1d747cf3c088c231dcade1d2b18ffa8b1c109797 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_28.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitac_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer28/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitac_splitaa \ + --output-prefix $FINAL_DIR/tokenizer28/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_336.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_336.sh new file mode 100644 index 0000000000000000000000000000000000000000..1521fb6b3447afb77c34e1ed72476d7ebefa083e --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_336.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitaa_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer336/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitaa_splitaa \ + --output-prefix $FINAL_DIR/tokenizer336/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_349.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_349.sh new file mode 100644 index 0000000000000000000000000000000000000000..6ae4d5b3711fb3a17e4dcb56c81551ff99888fbf --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_349.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer349/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer349/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_362.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_362.sh new file mode 100644 index 0000000000000000000000000000000000000000..4c5606b20f86e42cce567cbb8d535801b4dc31a0 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_362.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitaa_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer362/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitaa_splitae \ + --output-prefix $FINAL_DIR/tokenizer362/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_370.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_370.sh new file mode 100644 index 0000000000000000000000000000000000000000..81fa763ddab9919de17622f978ac71738215f645 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_370.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitac_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer370/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitac_splitac \ + --output-prefix $FINAL_DIR/tokenizer370/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_371.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_371.sh new file mode 100644 index 0000000000000000000000000000000000000000..cbcee32bf06c526ca9bb3f832f560bc8a5e66af8 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_371.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer371/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer371/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_373.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_373.sh new file mode 100644 index 0000000000000000000000000000000000000000..d133c65b44f077ca2f27151fdd4ea6fa669926c4 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_373.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer373/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer373/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_377.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_377.sh new file mode 100644 index 0000000000000000000000000000000000000000..3fda5d156c791b17aedc6c95147df724f3ed2bd2 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_377.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitad_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer377/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitad_splitae \ + --output-prefix $FINAL_DIR/tokenizer377/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_383.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_383.sh new file mode 100644 index 0000000000000000000000000000000000000000..1cc797f719024b3203e2ab2b2ba849bdb73cf5ce --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_383.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitaa_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer383/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitaa_splitad \ + --output-prefix $FINAL_DIR/tokenizer383/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_386.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_386.sh new file mode 100644 index 0000000000000000000000000000000000000000..cfdfc4fb947eb1b73ff00267745957fbd755c765 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_386.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer386/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitab_splitab \ + --output-prefix $FINAL_DIR/tokenizer386/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_392.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_392.sh new file mode 100644 index 0000000000000000000000000000000000000000..24f80d8e230ef62133ddb71a4f71938bfcef55f9 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_392.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitac_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer392/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitac_splitac \ + --output-prefix $FINAL_DIR/tokenizer392/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_4.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_4.sh new file mode 100644 index 0000000000000000000000000000000000000000..ac0e3a5969292d29cb4db4d285bccad1a2dc70e5 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_4.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitaa_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer4/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitaa_splitae \ + --output-prefix $FINAL_DIR/tokenizer4/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_414.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_414.sh new file mode 100644 index 0000000000000000000000000000000000000000..8442ebccc19e0a72258569d40b28f592849ebf15 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_414.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitae_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer414/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalat_splitae_splitaa \ + --output-prefix $FINAL_DIR/tokenizer414/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_420.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_420.sh new file mode 100644 index 0000000000000000000000000000000000000000..ad6036955158a2a5ce2487f297115efd436794ec --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_420.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer420/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitaa \ + --output-prefix $FINAL_DIR/tokenizer420/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_448.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_448.sh new file mode 100644 index 0000000000000000000000000000000000000000..f9278a323e56e73c8133f170941313143ccb817f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_448.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer448/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitab_splitab \ + --output-prefix $FINAL_DIR/tokenizer448/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_454.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_454.sh new file mode 100644 index 0000000000000000000000000000000000000000..4905a50e8093e4df60e136e1fec52c40b5e65cf6 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_454.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitac_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer454/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitac_splitac \ + --output-prefix $FINAL_DIR/tokenizer454/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_456.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_456.sh new file mode 100644 index 0000000000000000000000000000000000000000..728da1e254dae643f4a4171bfa9dce78798b0e91 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_456.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitac_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer456/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitac_splitae \ + --output-prefix $FINAL_DIR/tokenizer456/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_48.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_48.sh new file mode 100644 index 0000000000000000000000000000000000000000..681f398244670171cecd8e3455e6a6030ed5cc84 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_48.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitab_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer48/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitab_splitad \ + --output-prefix $FINAL_DIR/tokenizer48/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_483.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_483.sh new file mode 100644 index 0000000000000000000000000000000000000000..0f13ba410d47f24928c2e598975513b02ff8a40a --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_483.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer483/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer483/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_502.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_502.sh new file mode 100644 index 0000000000000000000000000000000000000000..d16320f6f2c88c14cf9cddb11ff92b04b7af9867 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_502.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitae_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer502/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitae_splitaa \ + --output-prefix $FINAL_DIR/tokenizer502/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_503.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_503.sh new file mode 100644 index 0000000000000000000000000000000000000000..dedeb82f01a46c4a8f329b37c87e0fb5ce809890 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_503.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitae_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer503/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitae_splitab \ + --output-prefix $FINAL_DIR/tokenizer503/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_508.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_508.sh new file mode 100644 index 0000000000000000000000000000000000000000..3d7c43a33e69d84c65ed6fc4f33de17ef9b7e0bd --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_508.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer508/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitaa \ + --output-prefix $FINAL_DIR/tokenizer508/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_512.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_512.sh new file mode 100644 index 0000000000000000000000000000000000000000..f1e2040cf5eae6ccee2e0c15e1cbd42cdf9a37a8 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_512.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitab_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer512/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitab_splitad \ + --output-prefix $FINAL_DIR/tokenizer512/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_537.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_537.sh new file mode 100644 index 0000000000000000000000000000000000000000..44bd4469495732e465d9bc2c5b6839519991a3f2 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_537.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitab_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer537/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitab_splitac \ + --output-prefix $FINAL_DIR/tokenizer537/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_567.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_567.sh new file mode 100644 index 0000000000000000000000000000000000000000..c00f2edafd5a3d672ebc3cf7ae882b74f4b6c0fc --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_567.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer567/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer567/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_57.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_57.sh new file mode 100644 index 0000000000000000000000000000000000000000..8f37ad56d0b0d2f531caf8733270145ca202e2b7 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_57.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer57/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitaf \ + --output-prefix $FINAL_DIR/tokenizer57/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_577.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_577.sh new file mode 100644 index 0000000000000000000000000000000000000000..534ff1b83de81ab086e2d8a66ae44a6cc87d5d30 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_577.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitab_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer577/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitab_splitac \ + --output-prefix $FINAL_DIR/tokenizer577/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_581.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_581.sh new file mode 100644 index 0000000000000000000000000000000000000000..42316ff51da897dc81c7d315ef4de68bacace611 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_581.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitac_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer581/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitac_splitab \ + --output-prefix $FINAL_DIR/tokenizer581/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_590.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_590.sh new file mode 100644 index 0000000000000000000000000000000000000000..3a9b460858f64145d4b3504ddcea748d6d4e1f11 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_590.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitae_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer590/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitae_splitaa \ + --output-prefix $FINAL_DIR/tokenizer590/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_599.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_599.sh new file mode 100644 index 0000000000000000000000000000000000000000..0928a47dc059e18bceabc762df1eed5febeda481 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_599.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitaa_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer599/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitaa_splitad \ + --output-prefix $FINAL_DIR/tokenizer599/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_602.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_602.sh new file mode 100644 index 0000000000000000000000000000000000000000..86692eebacce4b7595589625f9cfdeade46110eb --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_602.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer602/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitab_splitab \ + --output-prefix $FINAL_DIR/tokenizer602/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_609.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_609.sh new file mode 100644 index 0000000000000000000000000000000000000000..871e0441e8f4a42ce34d8bad47f88a47d00b383e --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_609.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer609/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer609/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_627.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_627.sh new file mode 100644 index 0000000000000000000000000000000000000000..e27a136148c5106c3e67357ab4929523ae316a3c --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_627.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbd_splitab_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer627/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbd_splitab_splitae \ + --output-prefix $FINAL_DIR/tokenizer627/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_628.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_628.sh new file mode 100644 index 0000000000000000000000000000000000000000..5a9bd40848b65c913617ea658cbc91c180e5beb1 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_628.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbd_splitac_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer628/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbd_splitac_splitaa \ + --output-prefix $FINAL_DIR/tokenizer628/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_63.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_63.sh new file mode 100644 index 0000000000000000000000000000000000000000..b00df1b3bd126dd2aeee87c74af7874ec4c3808e --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_63.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitab_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer63/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitab_splitaa \ + --output-prefix $FINAL_DIR/tokenizer63/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_634.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_634.sh new file mode 100644 index 0000000000000000000000000000000000000000..bfff4192b876599a907f00ed1b50db153aa9ef0f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_634.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbd_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer634/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbd_splitae \ + --output-prefix $FINAL_DIR/tokenizer634/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_660.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_660.sh new file mode 100644 index 0000000000000000000000000000000000000000..c21bfee9e7681f3ceb1be86aca51d7ef17c4d5a4 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_660.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitaa_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer660/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitaa_splitac \ + --output-prefix $FINAL_DIR/tokenizer660/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_661.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_661.sh new file mode 100644 index 0000000000000000000000000000000000000000..f273f4c7fc8b7e0911aad58e4cd170ed2a151629 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_661.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitaa_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer661/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitaa_splitad \ + --output-prefix $FINAL_DIR/tokenizer661/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_670.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_670.sh new file mode 100644 index 0000000000000000000000000000000000000000..615718824c8d6f2b5514c290640edcf04dc06941 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_670.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitac_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer670/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitac_splitac \ + --output-prefix $FINAL_DIR/tokenizer670/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_673.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_673.sh new file mode 100644 index 0000000000000000000000000000000000000000..4840ee94d37d6d1f13156ef67e7fb51ac6631f95 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_673.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer673/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer673/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_674.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_674.sh new file mode 100644 index 0000000000000000000000000000000000000000..26bbabdec1ff8ca578792eaae82c01749e31dc80 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_674.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitad_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer674/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitad_splitab \ + --output-prefix $FINAL_DIR/tokenizer674/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_682.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_682.sh new file mode 100644 index 0000000000000000000000000000000000000000..40930ff1e85aaffabfa9cfd4efe939b1064f1898 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_682.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitae_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer682/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitae_splitae \ + --output-prefix $FINAL_DIR/tokenizer682/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_684.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_684.sh new file mode 100644 index 0000000000000000000000000000000000000000..99e7f45265074e9cab279d7d7646cce10c2253ba --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_684.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer684/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitaa \ + --output-prefix $FINAL_DIR/tokenizer684/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_687.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_687.sh new file mode 100644 index 0000000000000000000000000000000000000000..3ca1f96cc9568217c532ba41e8027a4341a1ef4d --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_687.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitac_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer687/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitac_splitab \ + --output-prefix $FINAL_DIR/tokenizer687/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_696.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_696.sh new file mode 100644 index 0000000000000000000000000000000000000000..31ca77e55bf34dceb73475b2147653181ad7a9c1 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_696.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitae_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer696/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitae_splitaa \ + --output-prefix $FINAL_DIR/tokenizer696/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_703.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_703.sh new file mode 100644 index 0000000000000000000000000000000000000000..1102cfd884a1afeb0501a57c375cf40dd63a51cb --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_703.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitaa_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer703/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitaa_splitab \ + --output-prefix $FINAL_DIR/tokenizer703/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_714.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_714.sh new file mode 100644 index 0000000000000000000000000000000000000000..9014d117ed975c8c89cbf36081341bb34b40a822 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_714.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitac_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer714/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitac_splitac \ + --output-prefix $FINAL_DIR/tokenizer714/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_726.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_726.sh new file mode 100644 index 0000000000000000000000000000000000000000..a6776e8cce95ebd0eaa464e25c3e5c3e8eb09ef1 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_726.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitae_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer726/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitae_splitae \ + --output-prefix $FINAL_DIR/tokenizer726/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_732.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_732.sh new file mode 100644 index 0000000000000000000000000000000000000000..fa9233b4195560894f47107eaa7b1e22d10ddc44 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_732.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbi_splitaa_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer732/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbi_splitaa_splitae \ + --output-prefix $FINAL_DIR/tokenizer732/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_742.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_742.sh new file mode 100644 index 0000000000000000000000000000000000000000..7626fd6e6e6b3c58f96b40f08cbf99e09c43533d --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_742.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbi_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer742/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbi_splitaf \ + --output-prefix $FINAL_DIR/tokenizer742/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_751.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_751.sh new file mode 100644 index 0000000000000000000000000000000000000000..5c11a5e25d64d7151d7d19675a25c89a238f55ad --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_751.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitab_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer751/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitab_splitad \ + --output-prefix $FINAL_DIR/tokenizer751/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_754.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_754.sh new file mode 100644 index 0000000000000000000000000000000000000000..96a3a355b450f036243952faab6fbafe3c6630bf --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_754.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitac_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer754/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitac_splitab \ + --output-prefix $FINAL_DIR/tokenizer754/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_756.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_756.sh new file mode 100644 index 0000000000000000000000000000000000000000..a2f08741a429a96565cc9cb6ef541204bcbdcf39 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_756.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer756/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer756/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_760.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_760.sh new file mode 100644 index 0000000000000000000000000000000000000000..0a41e8f7473d692644ee8956c2dff16d38efa3d4 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_760.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitad_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer760/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitad_splitac \ + --output-prefix $FINAL_DIR/tokenizer760/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_775.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_775.sh new file mode 100644 index 0000000000000000000000000000000000000000..74105279e8a7ba9f7291a592902df907b7809620 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_775.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer775/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitab_splitab \ + --output-prefix $FINAL_DIR/tokenizer775/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_78.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_78.sh new file mode 100644 index 0000000000000000000000000000000000000000..3c04190f7a0fe07bb853616df4cf2d263a136273 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_78.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitae_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer78/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitae_splitaa \ + --output-prefix $FINAL_DIR/tokenizer78/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_781.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_781.sh new file mode 100644 index 0000000000000000000000000000000000000000..db0bb2ac5009f5925aec789751b7e93eb54688b3 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_781.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitac_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer781/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitac_splitac \ + --output-prefix $FINAL_DIR/tokenizer781/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_796.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_796.sh new file mode 100644 index 0000000000000000000000000000000000000000..8be339294bb22d58e3c9f1c3e0f304b779f4f17a --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_796.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitaa_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer796/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitaa_splitab \ + --output-prefix $FINAL_DIR/tokenizer796/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_816.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_816.sh new file mode 100644 index 0000000000000000000000000000000000000000..e2d9e57945fa03bd5843e174443024a4f66ab535 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_816.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer816/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbl_splitaf \ + --output-prefix $FINAL_DIR/tokenizer816/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_817.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_817.sh new file mode 100644 index 0000000000000000000000000000000000000000..0254ad3532e20991db458c3cc88a397ed225c9e8 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_817.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitaa_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer817/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitaa_splitaa \ + --output-prefix $FINAL_DIR/tokenizer817/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_828.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_828.sh new file mode 100644 index 0000000000000000000000000000000000000000..161ff2163c60674023b2b42d52c630debba21330 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_828.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitac_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer828/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitac_splitab \ + --output-prefix $FINAL_DIR/tokenizer828/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ +