AkashDataScience commited on
Commit
5581e99
·
1 Parent(s): 49c6fa3

Adding other tokenizers

Browse files
Files changed (1) hide show
  1. app.py +14 -5
app.py CHANGED
@@ -3,12 +3,20 @@ import random
3
  import gradio as gr
4
  from language_bpe import BPETokenizer
5
 
6
- tokenizer = BPETokenizer()
7
- tokenizer.load('models/english_5000.model')
8
 
9
- def inference(input_text):
10
- encoding = tokenizer.encode_ordinary(input_text)
11
- sentence = [tokenizer.decode([x]) for x in encoding]
 
 
 
 
 
 
 
 
12
  color_sentence = []
13
  color_encoding = []
14
  for word, encode in zip(sentence, encoding):
@@ -26,6 +34,7 @@ demo = gr.Interface(
26
  inference,
27
  inputs = [
28
  gr.Textbox(label="Enter any sentence in Hindi, English or both language", type="text"),
 
29
  ],
30
  outputs = [
31
  gr.Label(label="Token count"),
 
3
  import gradio as gr
4
  from language_bpe import BPETokenizer
5
 
6
+ hinglish_tokenizer = BPETokenizer()
7
+ hinglish_tokenizer.load('models/hinglish_5000.model')
8
 
9
+ hindi_tokenizer = BPETokenizer()
10
+ hindi_tokenizer.load('models/hindi_5000.model')
11
+
12
+ english_tokenizer = BPETokenizer()
13
+ english_tokenizer.load('models/english_5000.model')
14
+
15
+ tokenizer_dict = {"Hinglish_5k": hinglish_tokenizer, "Hindi_5k": hindi_tokenizer, "English_5k": english_tokenizer}
16
+
17
+ def inference(input_text, tokenizer):
18
+ encoding = tokenizer_dict[tokenizer].encode_ordinary(input_text)
19
+ sentence = [tokenizer_dict[tokenizer].decode([x]) for x in encoding]
20
  color_sentence = []
21
  color_encoding = []
22
  for word, encode in zip(sentence, encoding):
 
34
  inference,
35
  inputs = [
36
  gr.Textbox(label="Enter any sentence in Hindi, English or both language", type="text"),
37
+ gr.Dropdown(label="Tokenizer", choices=["Hinglish_5k", "Hindi_5k", "English_5k"], value="Hinglish_5k")
38
  ],
39
  outputs = [
40
  gr.Label(label="Token count"),