diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_103.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_103.sh new file mode 100644 index 0000000000000000000000000000000000000000..10ce55de7baaeabca048522ea4f6384a7d2ed70f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_103.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitad_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer103/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitad_splitae \ + --output-prefix $FINAL_DIR/tokenizer103/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_107.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_107.sh new file mode 100644 index 0000000000000000000000000000000000000000..9ab46cfefb730f841cb6e28f6594f512d421deae --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_107.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitae_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer107/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitae_splitad \ + --output-prefix $FINAL_DIR/tokenizer107/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_109.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_109.sh new file mode 100644 index 0000000000000000000000000000000000000000..46fb81daaca3ebbd3aaf6abd001e3045bc05cd57 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_109.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer109/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitaf \ + --output-prefix $FINAL_DIR/tokenizer109/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_119.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_119.sh new file mode 100644 index 0000000000000000000000000000000000000000..d7c37e00027c9237339c9e09656f67321fcdadde --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_119.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer119/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer119/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_125.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_125.sh new file mode 100644 index 0000000000000000000000000000000000000000..6c6412d7e857d84b49c0a55308a708c8bdfff567 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_125.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitab_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer125/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitab_splitaa \ + --output-prefix $FINAL_DIR/tokenizer125/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_150.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_150.sh new file mode 100644 index 0000000000000000000000000000000000000000..08634e3989b1d97dd829bf41afb05f8c013e7b89 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_150.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitab_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer150/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitab_splitad \ + --output-prefix $FINAL_DIR/tokenizer150/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_157.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_157.sh new file mode 100644 index 0000000000000000000000000000000000000000..b5d12098358fbf89f835853af938372e0c0d8e70 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_157.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer157/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitad \ + --output-prefix $FINAL_DIR/tokenizer157/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_159.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_159.sh new file mode 100644 index 0000000000000000000000000000000000000000..d98ca5a1c02ebecc909db99a2e813e9c956f6316 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_159.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitae_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer159/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitae_splitab \ + --output-prefix $FINAL_DIR/tokenizer159/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_164.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_164.sh new file mode 100644 index 0000000000000000000000000000000000000000..6d4906cbf57692046833f132c06f34c7b025899d --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_164.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer164/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitaa \ + --output-prefix $FINAL_DIR/tokenizer164/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_168.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_168.sh new file mode 100644 index 0000000000000000000000000000000000000000..a4ff627797d0c44a12afe901603bc008c3d2c1be --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_168.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitab_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer168/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitab_splitad \ + --output-prefix $FINAL_DIR/tokenizer168/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_169.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_169.sh new file mode 100644 index 0000000000000000000000000000000000000000..ef25e1efda0e01144cd3d696ac5d6e695495c0b2 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_169.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitab_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer169/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitab_splitae \ + --output-prefix $FINAL_DIR/tokenizer169/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_193.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_193.sh new file mode 100644 index 0000000000000000000000000000000000000000..eec7e1f263b50835d197c2090f92c79194490679 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_193.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitab_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer193/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitab_splitac \ + --output-prefix $FINAL_DIR/tokenizer193/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_214.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_214.sh new file mode 100644 index 0000000000000000000000000000000000000000..7bbdcf4d8b23388acddb9d53837a0b4a0685be97 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_214.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitac_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer214/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitac_splitaa \ + --output-prefix $FINAL_DIR/tokenizer214/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_230.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_230.sh new file mode 100644 index 0000000000000000000000000000000000000000..13929349f4eb5f8f24913c35142f05eafdf90530 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_230.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitaa_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer230/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitaa_splitaa \ + --output-prefix $FINAL_DIR/tokenizer230/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_245.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_245.sh new file mode 100644 index 0000000000000000000000000000000000000000..3fd1a1f4637f9fb305eb03a3f693e6d302cd597f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_245.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitad_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer245/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitad_splitae \ + --output-prefix $FINAL_DIR/tokenizer245/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_247.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_247.sh new file mode 100644 index 0000000000000000000000000000000000000000..a8040fe349e5afc6d6fe813fb7279b79c9e1f3e2 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_247.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer247/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitaf \ + --output-prefix $FINAL_DIR/tokenizer247/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_250.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_250.sh new file mode 100644 index 0000000000000000000000000000000000000000..997f603980521d07c2d0861a71808864fae23635 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_250.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer250/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitab_splitab \ + --output-prefix $FINAL_DIR/tokenizer250/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_262.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_262.sh new file mode 100644 index 0000000000000000000000000000000000000000..8184d37447c888708125a220770187f8aba2d72d --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_262.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitad_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer262/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitad_splitad \ + --output-prefix $FINAL_DIR/tokenizer262/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_275.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_275.sh new file mode 100644 index 0000000000000000000000000000000000000000..6bad372610afef8b70a694bf84a41d5c7ead6f06 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_275.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitab_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer275/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitab_splitae \ + --output-prefix $FINAL_DIR/tokenizer275/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_289.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_289.sh new file mode 100644 index 0000000000000000000000000000000000000000..ae2f846b9351ce4555ab1f1a850ecbecb1a10b60 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_289.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitaa_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer289/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitaa_splitab \ + --output-prefix $FINAL_DIR/tokenizer289/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_299.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_299.sh new file mode 100644 index 0000000000000000000000000000000000000000..798e307267cc72655eee2c11694c4c87cc19f8b2 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_299.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer299/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer299/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_308.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_308.sh new file mode 100644 index 0000000000000000000000000000000000000000..a3b4086c0f06dda2bd04e6439c50b8691fa165e9 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_308.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitae_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer308/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitae_splitae \ + --output-prefix $FINAL_DIR/tokenizer308/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_34.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_34.sh new file mode 100644 index 0000000000000000000000000000000000000000..fc848b33d03a1c7010eefccc6cfa79e286cf8d5e --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_34.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitae_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer34/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitae_splitaa \ + --output-prefix $FINAL_DIR/tokenizer34/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_35.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_35.sh new file mode 100644 index 0000000000000000000000000000000000000000..c4a505c5256003989b1fd703670dd03b244edcd6 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_35.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitae_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer35/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitae_splitab \ + --output-prefix $FINAL_DIR/tokenizer35/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_353.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_353.sh new file mode 100644 index 0000000000000000000000000000000000000000..ce0e46560161e83928f367b06c971abe0e99c82f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_353.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitad_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer353/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitad_splitac \ + --output-prefix $FINAL_DIR/tokenizer353/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_427.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_427.sh new file mode 100644 index 0000000000000000000000000000000000000000..308f0c182f4aedb19f765e53e7447b0433708f7c --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_427.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitac_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer427/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitac_splitab \ + --output-prefix $FINAL_DIR/tokenizer427/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_44.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_44.sh new file mode 100644 index 0000000000000000000000000000000000000000..811884936aa2d315dbe85fd637e10650a65b87a8 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_44.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitaa_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer44/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitaa_splitae \ + --output-prefix $FINAL_DIR/tokenizer44/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_449.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_449.sh new file mode 100644 index 0000000000000000000000000000000000000000..86d38863b6ff1332dea51aa88e35b9e7904c1487 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_449.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitab_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer449/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitab_splitac \ + --output-prefix $FINAL_DIR/tokenizer449/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_457.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_457.sh new file mode 100644 index 0000000000000000000000000000000000000000..c5a6af0e9846cb32bf9d19462d77fb4db18c95f9 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_457.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitad_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer457/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitad_splitaa \ + --output-prefix $FINAL_DIR/tokenizer457/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_463.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_463.sh new file mode 100644 index 0000000000000000000000000000000000000000..5daa3907433ecedce3c3768de8a24a3f33dd35ba --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_463.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitae_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer463/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitae_splitab \ + --output-prefix $FINAL_DIR/tokenizer463/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_467.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_467.sh new file mode 100644 index 0000000000000000000000000000000000000000..57d1f2f6c3361db4d17262583d58b9dfc37293d4 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_467.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitaf +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer467/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitaf \ + --output-prefix $FINAL_DIR/tokenizer467/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_47.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_47.sh new file mode 100644 index 0000000000000000000000000000000000000000..b016f5278ca79c9778d96a2a0ab9ab9ef7299f6d --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_47.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitab_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer47/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitab_splitac \ + --output-prefix $FINAL_DIR/tokenizer47/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_478.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_478.sh new file mode 100644 index 0000000000000000000000000000000000000000..f9a99092a5857e36aa861e716c3db5ea4b251f0d --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_478.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitac_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer478/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitac_splitaa \ + --output-prefix $FINAL_DIR/tokenizer478/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_481.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_481.sh new file mode 100644 index 0000000000000000000000000000000000000000..1a9beb1220796a196dd50358e8bbea771cef42c4 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_481.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer481/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer481/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_482.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_482.sh new file mode 100644 index 0000000000000000000000000000000000000000..9f780e1bf6c3919b793aa5d6d3cbdf04d7ed9da0 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_482.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitac_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer482/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitac_splitae \ + --output-prefix $FINAL_DIR/tokenizer482/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_486.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_486.sh new file mode 100644 index 0000000000000000000000000000000000000000..79325117ed3c4f5df9d7de5f31557b4a93ef86cf --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_486.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitad_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer486/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitad_splitad \ + --output-prefix $FINAL_DIR/tokenizer486/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_487.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_487.sh new file mode 100644 index 0000000000000000000000000000000000000000..ca44513f4c6d9b1ab40e4a8006006adfc723395d --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_487.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitad_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer487/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitad_splitae \ + --output-prefix $FINAL_DIR/tokenizer487/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_515.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_515.sh new file mode 100644 index 0000000000000000000000000000000000000000..7e8ace52dd097704313b40f131cda41c351f6a4f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_515.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitac_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer515/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitac_splitab \ + --output-prefix $FINAL_DIR/tokenizer515/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_53.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_53.sh new file mode 100644 index 0000000000000000000000000000000000000000..cddb247285f321024240754408a1dd97c3224b12 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_53.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitac_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer53/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitac_splitad \ + --output-prefix $FINAL_DIR/tokenizer53/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_530.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_530.sh new file mode 100644 index 0000000000000000000000000000000000000000..7509758cc853f6db05d97d154a2b28f9ca7dfc2a --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_530.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitaa_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer530/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitaa_splitaa \ + --output-prefix $FINAL_DIR/tokenizer530/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_532.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_532.sh new file mode 100644 index 0000000000000000000000000000000000000000..f9cadb28d3cedde15f1fa5ff286a3d80cf37171a --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_532.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitaa_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer532/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitaa_splitac \ + --output-prefix $FINAL_DIR/tokenizer532/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_535.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_535.sh new file mode 100644 index 0000000000000000000000000000000000000000..088ca61453bf1ab6f15619df425df2f12bf579c0 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_535.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitab_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer535/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitab_splitaa \ + --output-prefix $FINAL_DIR/tokenizer535/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_543.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_543.sh new file mode 100644 index 0000000000000000000000000000000000000000..1d22139a61b47c51e9903f300844b663b81eb6dd --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_543.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitad_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer543/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitad_splitac \ + --output-prefix $FINAL_DIR/tokenizer543/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_556.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_556.sh new file mode 100644 index 0000000000000000000000000000000000000000..dc44fdb7adc36f1a397af04e9d98842d1652e476 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_556.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitaa_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer556/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitaa_splitae \ + --output-prefix $FINAL_DIR/tokenizer556/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_560.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_560.sh new file mode 100644 index 0000000000000000000000000000000000000000..236c9d761e0ba2570f8ab51a06368abdfce85108 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_560.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitab_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer560/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitab_splitad \ + --output-prefix $FINAL_DIR/tokenizer560/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_563.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_563.sh new file mode 100644 index 0000000000000000000000000000000000000000..cff90b3d01a54446839c1794743027a0f4bcef8e --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_563.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitac_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer563/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitac_splitab \ + --output-prefix $FINAL_DIR/tokenizer563/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_566.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_566.sh new file mode 100644 index 0000000000000000000000000000000000000000..19f395f23dd0a0f43f632e695c725143a89eb524 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_566.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitac_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer566/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitac_splitae \ + --output-prefix $FINAL_DIR/tokenizer566/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_572.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_572.sh new file mode 100644 index 0000000000000000000000000000000000000000..d486047c77fcd39c3f6867d5a2b91bdba08309d2 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_572.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer572/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitae \ + --output-prefix $FINAL_DIR/tokenizer572/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_575.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_575.sh new file mode 100644 index 0000000000000000000000000000000000000000..43d275458a3ecf31ea164013d14766d654774345 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_575.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitab_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer575/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitab_splitaa \ + --output-prefix $FINAL_DIR/tokenizer575/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_576.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_576.sh new file mode 100644 index 0000000000000000000000000000000000000000..c87513eb187e29bb2ec7c41e84afd039d99fab17 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_576.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer576/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitab_splitab \ + --output-prefix $FINAL_DIR/tokenizer576/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_58.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_58.sh new file mode 100644 index 0000000000000000000000000000000000000000..06f866c32684e60ef4701e9cebde259dc7c1b20f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_58.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitaa_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer58/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitaa_splitaa \ + --output-prefix $FINAL_DIR/tokenizer58/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_588.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_588.sh new file mode 100644 index 0000000000000000000000000000000000000000..4066c00888c20f4fa263b6624934b3f3b69124d4 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_588.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitad_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer588/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitad_splitad \ + --output-prefix $FINAL_DIR/tokenizer588/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_591.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_591.sh new file mode 100644 index 0000000000000000000000000000000000000000..2be97f5bc40cf802c1d027dedb51e82687f82272 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_591.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitae_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer591/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitae_splitab \ + --output-prefix $FINAL_DIR/tokenizer591/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_60.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_60.sh new file mode 100644 index 0000000000000000000000000000000000000000..7d0d473c763317174f754aaa2018a5c31c717018 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_60.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitaa_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer60/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitaa_splitac \ + --output-prefix $FINAL_DIR/tokenizer60/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_603.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_603.sh new file mode 100644 index 0000000000000000000000000000000000000000..a9698a59823e8ce94a4e1b5fe88692c8fd8d11eb --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_603.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitab_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer603/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitab_splitac \ + --output-prefix $FINAL_DIR/tokenizer603/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_616.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_616.sh new file mode 100644 index 0000000000000000000000000000000000000000..6592aa8ac608beea614bc70451337ffbcaf8bf85 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_616.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitae_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer616/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitae_splitaa \ + --output-prefix $FINAL_DIR/tokenizer616/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_620.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_620.sh new file mode 100644 index 0000000000000000000000000000000000000000..3ce7288b9c29afb2896dd4d3f7a7d88cc759589b --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_620.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitae_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer620/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbc_splitae_splitae \ + --output-prefix $FINAL_DIR/tokenizer620/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_642.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_642.sh new file mode 100644 index 0000000000000000000000000000000000000000..30adb8bf44380e07f4891c14f3b69fcf74fc406c --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_642.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbe_splitac_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer642/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbe_splitac_splitaa \ + --output-prefix $FINAL_DIR/tokenizer642/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_646.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_646.sh new file mode 100644 index 0000000000000000000000000000000000000000..9a99576021a45f3786871bb5293e87e8be22fa70 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_646.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbe_splitac_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer646/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbe_splitac_splitae \ + --output-prefix $FINAL_DIR/tokenizer646/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_65.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_65.sh new file mode 100644 index 0000000000000000000000000000000000000000..6ece8791471459d0bde3f5bf9f37306478df9c9f --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_65.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitab_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer65/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitab_splitac \ + --output-prefix $FINAL_DIR/tokenizer65/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_656.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_656.sh new file mode 100644 index 0000000000000000000000000000000000000000..2d1c1298c6a28054a28907f52da07266aa2495df --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_656.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbe_splitae_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer656/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbe_splitae_splitae \ + --output-prefix $FINAL_DIR/tokenizer656/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_668.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_668.sh new file mode 100644 index 0000000000000000000000000000000000000000..5e5c86f87a083c6f1c947c7c1fa5854ab5a0a8e4 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_668.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitac_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer668/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbf_splitac_splitaa \ + --output-prefix $FINAL_DIR/tokenizer668/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_67.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_67.sh new file mode 100644 index 0000000000000000000000000000000000000000..aa0241976e1f326b0bb982923bdbc765037466a8 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_67.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitab_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer67/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalad_splitab_splitae \ + --output-prefix $FINAL_DIR/tokenizer67/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_693.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_693.sh new file mode 100644 index 0000000000000000000000000000000000000000..01598523de11fd8c197200d1f9162225d1e5e325 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_693.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitad_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer693/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitad_splitac \ + --output-prefix $FINAL_DIR/tokenizer693/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_699.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_699.sh new file mode 100644 index 0000000000000000000000000000000000000000..8a1107664dd2350765a7e668c161ddb0e289a8c3 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_699.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitae_splitad +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer699/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbg_splitae_splitad \ + --output-prefix $FINAL_DIR/tokenizer699/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_7.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_7.sh new file mode 100644 index 0000000000000000000000000000000000000000..e170cb9870cef3cd73b1cc3deade53b42795a9b1 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_7.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitab_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer7/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaa_splitab_splitac \ + --output-prefix $FINAL_DIR/tokenizer7/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_707.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_707.sh new file mode 100644 index 0000000000000000000000000000000000000000..9d29e0adfea9bd73af624e6da4518c26839a6d61 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_707.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitab_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer707/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitab_splitaa \ + --output-prefix $FINAL_DIR/tokenizer707/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_708.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_708.sh new file mode 100644 index 0000000000000000000000000000000000000000..a770c06a7858a0f63a422c1569d40f7964154804 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_708.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitab_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer708/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbh_splitab_splitab \ + --output-prefix $FINAL_DIR/tokenizer708/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_744.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_744.sh new file mode 100644 index 0000000000000000000000000000000000000000..32a5236717e6ad491bd81f1c6e0f69500fe3d890 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_744.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitaa_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer744/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitaa_splitab \ + --output-prefix $FINAL_DIR/tokenizer744/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_762.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_762.sh new file mode 100644 index 0000000000000000000000000000000000000000..a9a53867e10cd75b1c015ffdf19738c2fd36a010 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_762.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitad_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer762/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbj_splitad_splitae \ + --output-prefix $FINAL_DIR/tokenizer762/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_774.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_774.sh new file mode 100644 index 0000000000000000000000000000000000000000..1e92752b672ffda91e12319325e62eb9a82d6674 --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_774.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitab_splitaa +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer774/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitab_splitaa \ + --output-prefix $FINAL_DIR/tokenizer774/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_776.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_776.sh new file mode 100644 index 0000000000000000000000000000000000000000..a95fe2acbc237c5cafd243684782589e3b74cc0b --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_776.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitab_splitac +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer776/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbk_splitab_splitac \ + --output-prefix $FINAL_DIR/tokenizer776/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_818.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_818.sh new file mode 100644 index 0000000000000000000000000000000000000000..e1d679e845cda85e2d8979316161a6ce7d4c745d --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_818.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitaa_splitab +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer818/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbm_splitaa_splitab \ + --output-prefix $FINAL_DIR/tokenizer818/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_93.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_93.sh new file mode 100644 index 0000000000000000000000000000000000000000..38d37704bc59edb8bf55fcee66d079ff99a3fcce --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_93.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitab_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer93/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitab_splitae \ + --output-prefix $FINAL_DIR/tokenizer93/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ + diff --git a/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_98.sh b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_98.sh new file mode 100644 index 0000000000000000000000000000000000000000..8ece5d13dc4d5b3e157b5135dad23abc812a8f7b --- /dev/null +++ b/hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_98.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -m +sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitac_splitae +echo "above deepspeed$" +FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/ +#TOKENIZER=facebook/nllb-200-distilled-600M +#TOKENIZER_TYPE=HuggingFaceTokenizer +TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k +TOKENIZER=$TOKENIZER_PATH/all.model +VOCAB_FILE=$TOKENIZER_PATH/all.vocab +TOKENIZER_TYPE=SentencePieceTokenizer +mkdir -p $FINAL_DIR/tokenizer98/ +cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed +echo "inside deepspeed" +pwd +python3 tools/preprocess_data.py \ + --input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitac_splitae \ + --output-prefix $FINAL_DIR/tokenizer98/ \ + --tokenizer-model $TOKENIZER \ + --vocab-file $VOCAB_FILE \ + --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ + --append-eod --workers 8 # --partitions 16 #--chunk-size 50 + + # --merge-file $MERGES_FILE \ +