add tokenizer
Browse files- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]"}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"ت": 0, "ذ": 1, "ق": 2, "ف": 3, "ئ": 4, "ٹ": 5, "ظ": 6, "ڈ": 7, "ک": 8, "ج": 9, "ا": 10, "ص": 11, "ھ": 12, "ہ": 13, "م": 14, "ر": 15, "ح": 16, "چ": 17, "ی": 18, "ء": 19, "گ": 20, "ے": 21, "آ": 22, "و": 23, "ڑ": 24, "ں": 25, "ض": 26, "پ": 27, "ل": 28, " ": 29, "ب": 30, "خ": 31, "س": 32, "ن": 33, "ؤ": 34, "ث": 35, "ط": 36, "ژ": 37, "غ": 38, "ش": 39, "ز": 40, "د": 41, "ع": 42, "[UNK]": 43, "[PAD]": 44}
|