add tokenizer
Browse files- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]"}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"ف": 0, "ص": 1, "ٹ": 2, "ذ": 3, "ب": 4, "ڈ": 5, "ہ": 6, "آ": 7, "م": 8, "ئ": 9, "ے": 10, "ح": 11, "س": 12, "ر": 13, "ت": 14, "ظ": 15, "ھ": 16, " ": 17, "ع": 18, "د": 19, "ز": 20, "گ": 21, "ژ": 22, "ل": 23, "ا": 24, "ق": 25, "خ": 26, "غ": 27, "ط": 28, "و": 29, "ث": 30, "ء": 31, "ک": 32, "ں": 33, "ج": 34, "ش": 35, "ن": 36, "ڑ": 37, "چ": 38, "پ": 39, "ی": 40, "ض": 41, "ؤ": 42, "[UNK]": 43, "[PAD]": 44}
|