applied-ai-018 commited on
Commit
74f5d17
·
verified ·
1 Parent(s): 1b29b5f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. dravid/tok_dravid/tok_files/tokenizer_0.sh +25 -0
  2. dravid/tok_dravid/tok_files/tokenizer_1.sh +25 -0
  3. dravid/tok_dravid/tok_files/tokenizer_10.sh +25 -0
  4. dravid/tok_dravid/tok_files/tokenizer_100.sh +25 -0
  5. dravid/tok_dravid/tok_files/tokenizer_101.sh +25 -0
  6. dravid/tok_dravid/tok_files/tokenizer_102.sh +25 -0
  7. dravid/tok_dravid/tok_files/tokenizer_105.sh +25 -0
  8. dravid/tok_dravid/tok_files/tokenizer_108.sh +25 -0
  9. dravid/tok_dravid/tok_files/tokenizer_110.sh +25 -0
  10. dravid/tok_dravid/tok_files/tokenizer_111.sh +25 -0
  11. dravid/tok_dravid/tok_files/tokenizer_114.sh +25 -0
  12. dravid/tok_dravid/tok_files/tokenizer_117.sh +25 -0
  13. dravid/tok_dravid/tok_files/tokenizer_118.sh +25 -0
  14. dravid/tok_dravid/tok_files/tokenizer_12.sh +25 -0
  15. dravid/tok_dravid/tok_files/tokenizer_121.sh +25 -0
  16. dravid/tok_dravid/tok_files/tokenizer_122.sh +25 -0
  17. dravid/tok_dravid/tok_files/tokenizer_123.sh +25 -0
  18. dravid/tok_dravid/tok_files/tokenizer_124.sh +25 -0
  19. dravid/tok_dravid/tok_files/tokenizer_14.sh +25 -0
  20. dravid/tok_dravid/tok_files/tokenizer_18.sh +25 -0
  21. dravid/tok_dravid/tok_files/tokenizer_19.sh +25 -0
  22. dravid/tok_dravid/tok_files/tokenizer_2.sh +25 -0
  23. dravid/tok_dravid/tok_files/tokenizer_22.sh +25 -0
  24. dravid/tok_dravid/tok_files/tokenizer_24.sh +25 -0
  25. dravid/tok_dravid/tok_files/tokenizer_27.sh +25 -0
  26. dravid/tok_dravid/tok_files/tokenizer_28.sh +25 -0
  27. dravid/tok_dravid/tok_files/tokenizer_3.sh +25 -0
  28. dravid/tok_dravid/tok_files/tokenizer_30.sh +25 -0
  29. dravid/tok_dravid/tok_files/tokenizer_33.sh +25 -0
  30. dravid/tok_dravid/tok_files/tokenizer_34.sh +25 -0
  31. dravid/tok_dravid/tok_files/tokenizer_35.sh +25 -0
  32. dravid/tok_dravid/tok_files/tokenizer_37.sh +25 -0
  33. dravid/tok_dravid/tok_files/tokenizer_39.sh +25 -0
  34. dravid/tok_dravid/tok_files/tokenizer_4.sh +25 -0
  35. dravid/tok_dravid/tok_files/tokenizer_40.sh +25 -0
  36. dravid/tok_dravid/tok_files/tokenizer_41.sh +25 -0
  37. dravid/tok_dravid/tok_files/tokenizer_42.sh +25 -0
  38. dravid/tok_dravid/tok_files/tokenizer_43.sh +25 -0
  39. dravid/tok_dravid/tok_files/tokenizer_44.sh +25 -0
  40. dravid/tok_dravid/tok_files/tokenizer_45.sh +25 -0
  41. dravid/tok_dravid/tok_files/tokenizer_46.sh +25 -0
  42. dravid/tok_dravid/tok_files/tokenizer_47.sh +25 -0
  43. dravid/tok_dravid/tok_files/tokenizer_49.sh +25 -0
  44. dravid/tok_dravid/tok_files/tokenizer_5.sh +25 -0
  45. dravid/tok_dravid/tok_files/tokenizer_52.sh +25 -0
  46. dravid/tok_dravid/tok_files/tokenizer_53.sh +25 -0
  47. dravid/tok_dravid/tok_files/tokenizer_54.sh +25 -0
  48. dravid/tok_dravid/tok_files/tokenizer_55.sh +25 -0
  49. dravid/tok_dravid/tok_files/tokenizer_56.sh +25 -0
  50. dravid/tok_dravid/tok_files/tokenizer_60.sh +25 -0
dravid/tok_dravid/tok_files/tokenizer_0.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaa_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer0/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaa_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer0/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_1.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaa_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer1/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaa_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer1/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_10.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaf_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer10/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaf_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer10/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_100.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalby_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer100/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalby_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer100/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_101.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalby_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer101/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalby_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer101/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_102.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbz_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer102/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbz_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer102/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_105.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalca_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer105/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalca_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer105/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_108.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcc_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer108/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcc_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer108/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_110.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcd_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer110/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcd_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer110/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_111.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcd_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer111/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcd_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer111/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_114.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcf_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer114/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcf_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer114/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_117.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcg_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer117/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcg_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer117/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_118.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalch_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer118/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalch_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer118/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_12.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalag_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer12/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalag_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer12/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_121.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalci_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer121/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalci_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer121/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_122.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcj_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer122/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcj_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer122/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_123.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcj_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer123/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalcj_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer123/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_124.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalck_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer124/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalck_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer124/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_14.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalah_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer14/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalah_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer14/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_18.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaj_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer18/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaj_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer18/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_19.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaj_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer19/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaj_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer19/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_2.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalab_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer2/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalab_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer2/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_22.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalal_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer22/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalal_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer22/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_24.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalam_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer24/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalam_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer24/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_27.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalan_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer27/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalan_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer27/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_28.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalao_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer28/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalao_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer28/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_3.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalab_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer3/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalab_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer3/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_30.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalap_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer30/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalap_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer30/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_33.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaq_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer33/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaq_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer33/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_34.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalar_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer34/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalar_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer34/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_35.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalar_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer35/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalar_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer35/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_37.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalas_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer37/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalas_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer37/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_39.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalat_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer39/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalat_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer39/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_4.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalac_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer4/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalac_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer4/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_40.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalau_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer40/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalau_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer40/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_41.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalau_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer41/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalau_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer41/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_42.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalav_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer42/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalav_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer42/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_43.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalav_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer43/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalav_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer43/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_44.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaw_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer44/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaw_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer44/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_45.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaw_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer45/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalaw_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer45/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_46.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalax_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer46/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalax_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer46/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_47.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalax_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer47/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalax_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer47/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_49.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalay_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer49/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalay_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer49/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_5.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalac_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer5/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalac_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer5/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_52.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalba_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer52/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalba_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer52/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_53.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalba_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer53/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalba_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer53/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_54.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbb_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer54/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbb_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer54/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_55.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbb_splitab
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer55/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbb_splitab \
18
+ --output-prefix $FINAL_DIR/tokenizer55/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_56.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbc_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer56/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbc_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer56/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
dravid/tok_dravid/tok_files/tokenizer_60.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+ sed -i -e "s/raw_content/text/g" /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbe_splitaa
4
+ echo "above deepspeed$"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/dravid/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/dravid_64k/
9
+ TOKENIZER=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/ta_te_kan_ml_50kspm_tokenizer.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer60/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/datasets/dravid//tok_dravid/split//finalbe_splitaa \
18
+ --output-prefix $FINAL_DIR/tokenizer60/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+