add tokenizer
Browse files- added_tokens.json +1 -0
- special_tokens_map.json +1 -1
- tokenizer_config.json +1 -1
- vocab.json +1 -1
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<s>": 57, "</s>": 58}
|
special_tokens_map.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]"}
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
tokenizer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "special_tokens_map_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"
|
|
|
1 |
+
{"h": 0, "ю": 1, "ы": 2, "x": 3, "b": 4, "ё": 5, "р": 6, "f": 7, "э": 8, "n": 9, "д": 10, "m": 11, "c": 12, "ш": 13, "ч": 14, "g": 15, "а": 16, "г": 17, "ф": 18, "'": 19, "в": 20, "ж": 21, "p": 22, "k": 23, "a": 25, "м": 26, "у": 27, "о": 28, "r": 29, "ъ": 30, "з": 31, "я": 32, "н": 33, "e": 34, "ц": 35, "щ": 36, "o": 37, "l": 38, "б": 39, "е": 40, "i": 41, "с": 42, "л": 43, "ь": 44, "к": 45, "t": 46, "»": 47, "п": 48, "т": 49, "—": 50, "х": 51, "и": 52, "й": 53, "s": 54, "|": 24, "[UNK]": 55, "[PAD]": 56}
|