Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_106.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_111.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_121.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_139.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_142.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_160.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_161.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_170.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_175.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_200.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_201.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_204.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_208.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_215.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_221.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_222.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_226.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_228.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_260.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_274.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_282.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_293.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_295.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_314.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_32.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_328.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_351.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_356.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_358.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_36.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_364.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_365.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_379.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_391.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_399.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_428.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_452.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_460.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_484.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_493.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_497.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_498.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_51.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_524.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_525.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_526.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_528.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_538.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_547.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_554.sh +25 -0
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_106.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitae_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer106/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitae_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer106/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_111.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitaa_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer111/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitaa_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer111/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_121.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer121/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer121/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_139.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitad_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer139/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitad_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer139/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_142.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitae_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer142/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitae_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer142/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_160.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitae_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer160/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitae_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer160/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_161.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitae_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer161/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitae_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer161/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_170.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitac_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer170/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitac_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer170/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_175.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitad_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer175/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitad_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer175/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_200.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitac_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer200/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitac_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer200/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_201.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer201/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer201/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_204.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitae_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer204/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitae_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer204/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_208.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitaa_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer208/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitaa_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer208/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_215.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitac_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer215/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitac_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer215/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_221.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitad_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer221/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitad_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer221/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_222.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitad_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer222/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitad_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer222/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_226.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitae_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer226/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitae_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer226/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_228.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitae_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer228/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitae_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer228/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_260.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitad_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer260/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitad_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer260/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_274.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitab_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer274/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitab_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer274/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_282.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitad_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer282/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitad_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer282/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_293.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitab_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer293/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitab_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer293/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_295.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitab_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer295/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitab_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer295/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_314.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitaa_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer314/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitaa_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer314/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_32.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitac_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer32/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitac_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer32/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_328.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitad_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer328/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalap_splitad_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer328/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_351.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitad_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer351/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitad_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer351/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_356.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer356/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer356/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_358.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitaa_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer358/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitaa_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer358/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_36.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitae_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer36/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitae_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer36/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_364.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitab_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer364/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitab_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer364/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_365.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitab_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer365/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitab_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer365/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_379.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitaf
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer379/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalar_splitaf \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer379/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_391.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitac_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer391/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitac_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer391/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_399.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitad_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer399/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalas_splitad_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer399/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_428.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitac_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer428/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitac_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer428/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_452.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitac_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer452/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitac_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer452/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_460.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitad_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer460/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitad_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer460/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_484.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitad_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer484/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitad_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer484/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_493.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitaf
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer493/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitaf \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer493/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_497.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitac_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer497/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitac_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer497/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_498.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitac_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer498/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalax_splitac_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer498/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_51.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitac_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer51/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitac_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer51/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_524.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitae_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer524/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitae_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer524/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_525.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitae_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer525/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitae_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer525/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_526.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitae_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer526/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitae_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer526/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_528.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitae_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer528/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitae_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer528/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_538.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitab_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer538/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitab_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer538/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_547.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitae_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer547/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitae_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer547/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_554.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitaa_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer554/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitaa_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer554/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|