Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_103.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_107.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_109.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_119.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_125.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_150.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_157.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_159.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_164.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_168.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_169.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_193.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_214.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_230.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_245.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_247.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_250.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_262.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_275.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_289.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_299.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_308.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_34.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_35.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_353.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_427.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_44.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_449.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_457.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_463.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_467.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_47.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_478.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_481.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_482.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_486.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_487.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_515.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_53.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_530.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_532.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_535.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_543.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_556.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_560.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_563.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_566.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_572.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_575.sh +25 -0
- hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_576.sh +25 -0
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_103.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitad_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer103/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitad_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer103/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_107.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitae_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer107/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitae_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer107/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_109.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitaf
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer109/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalae_splitaf \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer109/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_119.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitac_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer119/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaf_splitac_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer119/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_125.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitab_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer125/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalag_splitab_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer125/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_150.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitab_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer150/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitab_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer150/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_157.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer157/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer157/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_159.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitae_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer159/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalah_splitae_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer159/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_164.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer164/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer164/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_168.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitab_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer168/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitab_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer168/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_169.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitab_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer169/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalai_splitab_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer169/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_193.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitab_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer193/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaj_splitab_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer193/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_214.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitac_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer214/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalak_splitac_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer214/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_230.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitaa_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer230/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitaa_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer230/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_245.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitad_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer245/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitad_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer245/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_247.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitaf
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer247/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalal_splitaf \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer247/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_250.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitab_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer250/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitab_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer250/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_262.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitad_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer262/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalam_splitad_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer262/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_275.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitab_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer275/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalan_splitab_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer275/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_289.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitaa_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer289/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitaa_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer289/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_299.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitad_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer299/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitad_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer299/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_308.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitae_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer308/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalao_splitae_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer308/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_34.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitae_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer34/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitae_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer34/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_35.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitae_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer35/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalab_splitae_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer35/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_353.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitad_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer353/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaq_splitad_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer353/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_427.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitac_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer427/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalau_splitac_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer427/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_44.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitaa_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer44/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitaa_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer44/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_449.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitab_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer449/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitab_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer449/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_457.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitad_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer457/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitad_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer457/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_463.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitae_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer463/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitae_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer463/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_467.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitaf
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer467/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalav_splitaf \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer467/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_47.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitab_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer47/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitab_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer47/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_478.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitac_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer478/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitac_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer478/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_481.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitac_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer481/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitac_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer481/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_482.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitac_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer482/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitac_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer482/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_486.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitad_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer486/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitad_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer486/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_487.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitad_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer487/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaw_splitad_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer487/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_515.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitac_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer515/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalay_splitac_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer515/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_53.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitac_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer53/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalac_splitac_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer53/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_530.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitaa_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer530/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitaa_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer530/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_532.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitaa_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer532/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitaa_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer532/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_535.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitab_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer535/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitab_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer535/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_543.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitad_splitac
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer543/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalaz_splitad_splitac \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer543/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_556.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitaa_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer556/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitaa_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer556/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_560.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitab_splitad
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer560/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitab_splitad \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer560/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_563.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitac_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer563/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitac_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer563/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_566.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitac_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer566/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitac_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer566/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_572.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitae
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer572/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalba_splitae \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer572/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_575.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitab_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer575/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitab_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer575/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
hn_eng_bn/tok_en-bn-hn/tok_files/tokenizer_576.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitab_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/tok_en-bn-hn/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/all.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer576/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/hn_eng_bn/tok_en-bn-hn/split//finalbb_splitab_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer576/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|