Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- dravid/tok_dravid/tok_files/tokenizer_0.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_1.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_10.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_100.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_101.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_102.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_105.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_108.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_110.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_111.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_114.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_117.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_118.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_12.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_121.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_122.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_123.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_124.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_14.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_18.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_19.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_2.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_22.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_24.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_27.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_28.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_3.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_30.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_33.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_34.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_35.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_37.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_39.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_4.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_40.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_41.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_42.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_43.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_44.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_45.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_46.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_47.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_49.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_5.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_52.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_53.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_54.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_55.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_56.sh +25 -0
- dravid/tok_dravid/tok_files/tokenizer_60.sh +25 -0
dravid/tok_dravid/tok_files/tokenizer_0.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaa_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer0/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaa_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer0/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_1.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaa_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer1/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaa_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer1/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_10.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaf_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer10/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaf_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer10/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_100.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalby_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer100/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalby_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer100/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_101.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalby_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer101/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalby_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer101/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_102.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbz_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer102/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbz_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer102/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_105.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalca_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer105/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalca_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer105/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_108.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcc_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer108/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcc_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer108/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_110.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcd_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer110/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcd_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer110/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_111.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcd_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer111/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcd_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer111/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_114.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcf_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer114/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcf_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer114/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_117.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcg_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer117/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcg_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer117/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_118.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalch_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer118/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalch_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer118/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_12.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalag_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer12/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalag_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer12/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_121.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalci_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer121/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalci_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer121/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_122.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcj_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer122/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcj_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer122/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_123.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcj_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer123/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcj_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer123/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_124.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalck_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer124/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalck_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer124/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_14.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalah_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer14/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalah_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer14/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_18.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaj_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer18/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaj_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer18/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_19.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaj_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer19/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaj_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer19/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_2.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalab_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer2/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalab_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer2/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_22.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalal_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer22/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalal_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer22/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_24.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalam_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer24/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalam_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer24/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_27.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalan_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer27/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalan_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer27/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_28.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalao_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer28/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalao_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer28/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_3.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalab_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer3/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalab_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer3/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_30.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalap_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer30/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalap_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer30/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_33.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaq_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer33/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaq_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer33/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_34.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalar_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer34/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalar_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer34/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_35.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalar_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer35/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalar_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer35/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_37.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalas_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer37/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalas_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer37/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_39.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalat_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer39/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalat_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer39/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_4.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalac_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer4/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalac_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer4/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_40.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalau_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer40/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalau_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer40/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_41.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalau_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer41/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalau_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer41/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_42.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalav_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer42/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalav_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer42/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_43.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalav_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer43/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalav_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer43/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_44.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaw_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer44/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaw_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer44/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_45.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaw_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer45/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaw_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer45/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_46.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalax_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer46/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalax_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer46/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_47.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalax_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer47/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalax_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer47/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_49.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalay_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer49/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalay_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer49/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_5.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalac_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer5/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalac_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer5/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_52.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalba_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer52/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalba_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer52/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_53.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalba_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer53/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalba_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer53/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_54.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbb_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer54/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbb_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer54/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_55.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbb_splitab
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer55/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbb_splitab \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer55/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_56.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbc_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer56/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbc_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer56/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|
dravid/tok_dravid/tok_files/tokenizer_60.sh
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -m
|
3 |
+
sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbe_splitaa
|
4 |
+
echo "above deepspeed$"
|
5 |
+
FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
|
6 |
+
#TOKENIZER=facebook/nllb-200-distilled-600M
|
7 |
+
#TOKENIZER_TYPE=HuggingFaceTokenizer
|
8 |
+
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
|
9 |
+
TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
|
10 |
+
VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
|
11 |
+
TOKENIZER_TYPE=SentencePieceTokenizer
|
12 |
+
mkdir -p $FINAL_DIR/tokenizer60/
|
13 |
+
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
|
14 |
+
echo "inside deepspeed"
|
15 |
+
pwd
|
16 |
+
python3 tools/preprocess_data.py \
|
17 |
+
--input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbe_splitaa \
|
18 |
+
--output-prefix $FINAL_DIR/tokenizer60/ \
|
19 |
+
--tokenizer-model $TOKENIZER \
|
20 |
+
--vocab-file $VOCAB_FILE \
|
21 |
+
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
|
22 |
+
--append-eod --workers 8 # --partitions 16 #--chunk-size 50
|
23 |
+
|
24 |
+
# --merge-file $MERGES_FILE \
|
25 |
+
|