Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- dravid/tok_dravid/tok_files/tokenizer_103.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_104.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_106.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_107.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_109.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_11.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_112.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_113.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_115.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_116.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_119.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_120.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_125.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_126.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_13.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_15.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_16.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_17.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_20.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_21.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_23.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_25.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_26.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_29.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_31.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_32.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_36.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_38.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_48.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_50.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_51.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_57.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_58.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_59.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_6.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_62.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_63.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_69.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_73.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_75.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_76.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_78.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_79.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_85.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_86.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_87.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_89.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_9.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_90.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_91.sh +25 -0
dravid/tok_dravid/tok_files/tokenizer_103.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbz_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer103/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbz_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer103/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_104.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalca_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer104/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalca_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer104/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_106.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcb_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer106/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcb_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer106/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_107.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcb_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer107/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcb_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer107/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_109.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcc_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer109/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcc_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer109/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_11.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaf_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer11/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaf_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer11/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_112.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalce_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer112/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalce_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer112/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_113.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalce_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer113/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalce_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer113/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_115.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcf_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer115/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcf_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer115/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_116.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcg_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer116/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcg_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer116/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_119.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalch_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer119/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalch_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer119/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_120.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalci_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer120/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalci_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer120/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_125.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalck_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer125/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalck_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer125/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_126.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcl
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer126/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcl \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer126/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_13.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalag_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer13/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalag_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer13/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_15.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalah_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer15/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalah_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer15/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_16.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalai_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer16/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalai_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer16/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_17.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalai_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer17/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalai_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer17/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_20.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalak_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer20/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalak_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer20/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_21.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalak_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer21/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalak_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer21/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_23.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalal_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer23/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalal_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer23/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_25.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalam_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer25/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalam_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer25/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_26.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalan_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer26/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalan_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer26/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_29.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalao_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer29/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalao_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer29/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_31.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalap_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer31/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalap_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer31/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_32.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaq_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer32/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaq_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer32/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_36.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalas_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer36/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalas_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer36/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_38.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalat_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer38/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalat_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer38/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_48.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalay_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer48/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalay_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer48/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_50.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaz_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer50/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaz_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer50/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_51.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaz_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer51/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaz_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer51/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_57.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbc_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer57/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbc_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer57/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_58.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbd_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer58/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbd_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer58/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_59.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbd_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer59/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbd_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer59/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_6.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalad_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer6/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalad_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer6/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_62.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbf_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer62/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbf_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer62/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_63.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbf_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer63/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbf_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer63/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_69.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbi_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer69/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbi_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer69/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_73.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbk_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer73/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbk_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer73/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_75.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbl_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer75/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbl_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer75/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_76.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbm_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer76/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbm_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer76/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_78.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbn_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer78/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbn_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer78/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_79.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbn_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer79/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbn_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer79/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_85.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbq_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer85/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbq_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer85/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_86.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbr_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer86/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbr_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer86/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_87.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbr_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer87/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbr_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer87/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_89.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbs_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer89/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbs_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer89/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_9.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalae_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer9/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalae_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer9/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_90.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbt_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer90/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbt_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer90/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_91.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbt_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer91/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbt_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer91/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|