piyushgrover commited on
Commit
6582b49
·
verified ·
1 Parent(s): 790e70f

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +50 -0
  2. hindi_bpe_vocab.model +3 -0
  3. requirements.txt +2 -0
  4. tokenizer.py +243 -0
app.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from tokenizer import HindiTokenizer
3
+
4
+ # Load the tokenizer
5
+ tokenizer = HindiTokenizer()
6
+ tokenizer.load_bpe_vocab("hindi_bpe_vocab.model")
7
+
8
+ def encode_text(hindi_text):
9
+ """
10
+ Encodes the given Hindi text into token IDs.
11
+ """
12
+ token_ids = tokenizer.encode(hindi_text)
13
+ return token_ids
14
+
15
+
16
+ def decode_tokens(token_ids):
17
+ """
18
+ Decodes the given token IDs into Hindi text.
19
+ """
20
+ # Ensure token_ids is a list of integers
21
+ try:
22
+ token_ids = list(map(int, token_ids.strip("[]").split(",")))
23
+ except Exception as e:
24
+ return f"Error in processing token IDs: {e}"
25
+
26
+ decoded_text = tokenizer.decode(token_ids)
27
+ return decoded_text
28
+
29
+
30
+ # Gradio interface
31
+ with gr.Blocks() as app:
32
+ gr.Markdown("## Hindi Tokenizer Encoder-Decoder")
33
+
34
+ with gr.Row():
35
+ with gr.Column():
36
+ gr.Markdown("### Encode Hindi Text to Token IDs")
37
+ hindi_text_input = gr.Textbox(label="Enter Hindi Text")
38
+ token_ids_output = gr.Textbox(label="Token IDs (Encoded)", interactive=False)
39
+ encode_button = gr.Button("Encode")
40
+
41
+ with gr.Column():
42
+ gr.Markdown("### Decode Token IDs to Hindi Text")
43
+ token_ids_input = gr.Textbox(label="Enter Token IDs (comma-separated or list)")
44
+ decoded_text_output = gr.Textbox(label="Decoded Hindi Text", interactive=False)
45
+ decode_button = gr.Button("Decode")
46
+
47
+ encode_button.click(encode_text, inputs=hindi_text_input, outputs=token_ids_output)
48
+ decode_button.click(decode_tokens, inputs=token_ids_input, outputs=decoded_text_output)
49
+
50
+ app.launch()
hindi_bpe_vocab.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:115bdb4f3416ad5d5fb1e4f9c3fe64eb97be62506d6e3fb51752ccc3d24de3bd
3
+ size 517
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ tqdm
2
+ gradio
tokenizer.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import pyarrow.parquet as pq
3
+ import regex as re
4
+ from collections import Counter, defaultdict
5
+ import json
6
+ from tqdm import tqdm
7
+
8
+ VOCAB_SIZE = 5000 # the desired final vocabulary size
9
+
10
+ def get_stats(ids, counts=None):
11
+
12
+ counts = {} if counts is None else counts
13
+ for pair in zip(ids, ids[1:]):
14
+ counts[pair] = counts.get(pair, 0) + 1
15
+ return counts
16
+
17
+
18
+ # ids: list of integer, pair: the pair of int we are merging, idx: the new int we want to replace the pair with.
19
+ def merge(ids, pair, idx):
20
+ """
21
+ In the list of integers (ids), replace all consecutive occurrences
22
+ of pair with the new integer token idx
23
+ Example: ids=[1, 2, 3, 1, 2], pair=(1, 2), idx=4 -> [4, 3, 4]
24
+ """
25
+ newids = []
26
+ i = 0
27
+ while i < len(ids):
28
+ if i < len(ids) - 1 and ids[i] == pair[0] and ids[i+1] == pair[1]:
29
+ newids.append(idx)
30
+ i += 2
31
+ else:
32
+ newids.append(ids[i])
33
+ i += 1
34
+ return newids
35
+
36
+
37
+ class HindiTokenizer():
38
+ def __init__(self):
39
+ self.pattern = r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{N}+| ?(?:[\u0904-\u0939\u093d-\u093d\u0950-\u0950\u0958-\u0961\u0970-\u097f\ua8f2-\ua8fe\U00011b00-\U00011b09\u1cd3-\u1cd3\u1ce9-\u1cec\u1cee-\u1cf3\u1cf5-\u1cf6\u1cfa-\u1cfa][\u0900-\u0903\u093a-\u093c\u093e-\u094f\u0951-\u0957\u0962-\u0963\ua8e0-\ua8f1\ua8ff-\ua8ff\u1cd0-\u1cd2\u1cd4-\u1ce8\u1ced-\u1ced\u1cf4-\u1cf4\u1cf7-\u1cf9]*)+| ?\p{L}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""";
40
+
41
+ self.merges = {}
42
+ self.vocab = {idx: bytes([idx]) for idx in range(256)}
43
+ self.special_tokens = {
44
+ '<|endoftext|>': VOCAB_SIZE
45
+ }
46
+
47
+ def _build_vocab(self):
48
+ # vocab is simply and deterministically derived from merges
49
+ vocab = {idx: bytes([idx]) for idx in range(256)} # initial vocab is first 255 unicode bytes
50
+ for (p0, p1), idx in self.merges.items(): # Get all the merges and add to vocab
51
+ vocab[idx] = vocab[p0] + vocab[p1]
52
+ for special, idx in self.special_tokens.items():
53
+ vocab[idx] = special.encode("utf-8")
54
+ return vocab
55
+
56
+ def tokenize_hindi(self, text):
57
+ # Tokenization for Hindi, including math digits
58
+ '''pattern = re.compile(r"""
59
+ |[\u0900-\u097F](?![\u0964\u0965])+ # Match Hindi words (Devanagari script)
60
+ |[\u0966-\u096F]+ # Match Hindi digits (०-९)
61
+ |[a-zA-Z]+ # Match English words (Latin script)
62
+ |[0-9]+ # Match Latin digits (0-9)
63
+ |\s+ # Match whitespace (spaces, tabs, newlines)
64
+ |'[^\r\n\p{L}\p{N}]*\p{L}+ # Match apostrophes followed by letters
65
+ |\p{N}{1,3} # Match numbers (1 to 3 digits)
66
+ |[^\s\p{L}\p{N}]+ # Match non-letter, non-number special characters
67
+ |\s*[\r\n] # Match line breaks and leading spaces
68
+ |\s+(?!\S) # Match trailing whitespace
69
+ """, re.VERBOSE)'''
70
+
71
+ pattern = re.compile(self.pattern)
72
+ return pattern.findall(text)
73
+
74
+
75
+ def learn_bpe_vocab(self, text, num_merges=50):
76
+ tokenized_text = self.tokenize_hindi(text)
77
+ #print(tokenized_text)
78
+ tokens = [list(map(int, token.encode("utf-8"))) for token in tokenized_text]
79
+ input_len = 0
80
+ for chunk_ids in tokens:
81
+ # calculate length of tokens for compression ratio.
82
+ # total token length is sum of all token length in each chunk.
83
+ input_len += len(chunk_ids)
84
+
85
+ for i in tqdm(range(num_merges), desc="Merging pairs", unit="merge"):
86
+ stats = {}
87
+ for chunk_ids in tokens:
88
+ stats = get_stats(chunk_ids, stats)
89
+
90
+ pair = max(stats, key=stats.get)
91
+ idx = 256 + i
92
+ tokens = [merge(chunk_ids, pair, idx) for chunk_ids in tokens]
93
+
94
+ self.merges[pair] = idx
95
+ self.vocab[idx] = self.vocab[pair[0]] + self.vocab[pair[1]]
96
+
97
+ output_len = 0
98
+ for chunk_ids in tokens:
99
+ output_len += len(chunk_ids)
100
+
101
+ print(f"input_len: {input_len}, output_len: {output_len} compression ratio: {input_len / output_len:.2f}X")
102
+
103
+
104
+ def save_bpe_vocab(self, model_file):
105
+ with open(model_file, 'w') as f:
106
+ # write the version, pattern and merges, that's all that's needed
107
+ f.write("minbpe v1\n")
108
+ f.write(f"{self.pattern}\n")
109
+ # write the special tokens, first the number of them, then each one
110
+ f.write(f"{len(self.special_tokens)}\n")
111
+ for special, idx in self.special_tokens.items():
112
+ f.write(f"{special} {idx}\n")
113
+ # the merges dict
114
+ for idx1, idx2 in self.merges:
115
+ f.write(f"{idx1} {idx2}\n")
116
+
117
+
118
+ def load_bpe_vocab(self, filepath):
119
+ assert filepath.endswith(".model")
120
+ # read the model file
121
+ merges = {}
122
+ special_tokens = {}
123
+ idx = 256
124
+ with open(filepath, 'r', encoding="utf-8") as f:
125
+ # read the version
126
+ version = f.readline().strip()
127
+ assert version == "minbpe v1"
128
+ # read the pattern
129
+ self.pattern = f.readline().strip()
130
+ # read the special tokens
131
+ num_special = int(f.readline().strip())
132
+ for _ in range(num_special):
133
+ special, special_idx = f.readline().strip().split()
134
+ special_tokens[special] = int(special_idx)
135
+ # read the merges
136
+ for line in f:
137
+ idx1, idx2 = map(int, line.split())
138
+ merges[(idx1, idx2)] = idx
139
+ idx += 1
140
+ self.merges = merges
141
+ self.special_tokens = special_tokens
142
+ self.vocab = self._build_vocab()
143
+
144
+ def register_special_tokens(self, special_tokens):
145
+ # special_tokens is a dictionary of str -> int
146
+ # example: {"<|endoftext|>": 100257}
147
+ self.special_tokens = special_tokens
148
+ self.inverse_special_tokens = {v: k for k, v in special_tokens.items()}
149
+
150
+ def decode(self, ids):
151
+ # given ids (list of integers), return Python string
152
+ part_bytes = []
153
+ # get the byte for the corresponding token from vocab
154
+ for idx in ids:
155
+ if idx in self.vocab:
156
+ part_bytes.append(self.vocab[idx])
157
+ elif idx in self.inverse_special_tokens:
158
+ part_bytes.append(self.inverse_special_tokens[idx].encode("utf-8"))
159
+ else:
160
+ raise ValueError(f"invalid token id: {idx}")
161
+ text_bytes = b"".join(part_bytes)
162
+ text = text_bytes.decode("utf-8", errors="replace")
163
+ return text
164
+
165
+ def _encode_chunk(self, text_bytes):
166
+ # return the token ids
167
+ # let's begin. first, convert all bytes to integers in range 0..255
168
+ ids = list(text_bytes)
169
+ while len(ids) >= 2:
170
+ # find the pair with the lowest merge index
171
+ stats = get_stats(ids)
172
+ pair = min(stats, key=lambda p: self.merges.get(p, float("inf")))
173
+ # subtle: if there are no more merges available, the key will
174
+ # result in an inf for every single pair, and the min will be
175
+ # just the first pair in the list, arbitrarily
176
+ # we can detect this terminating case by a membership check
177
+ if pair not in self.merges:
178
+ break # nothing else can be merged anymore
179
+ # otherwise let's merge the best pair (lowest merge index)
180
+ idx = self.merges[pair]
181
+ ids = merge(ids, pair, idx)
182
+ return ids
183
+
184
+ def encode_ordinary(self, text):
185
+ """Encoding that ignores any special tokens."""
186
+ # split text into chunks of text by categories defined in regex pattern
187
+ text_chunks = self.tokenize_hindi(text)
188
+ # all chunks of text are encoded separately, then results are joined
189
+ ids = []
190
+ for chunk in text_chunks:
191
+ chunk_bytes = chunk.encode("utf-8") # raw bytes
192
+ chunk_ids = self._encode_chunk(chunk_bytes)
193
+ ids.extend(chunk_ids)
194
+ return ids
195
+
196
+ def encode(self, text, allowed_special="none_raise"):
197
+ """
198
+ Unlike encode_ordinary, this function handles special tokens.
199
+ allowed_special: can be "all"|"none"|"none_raise" or a custom set of special tokens
200
+ if none_raise, then an error is raised if any special token is encountered in text
201
+ this is the default tiktoken behavior right now as well
202
+ any other behavior is either annoying, or a major footgun
203
+ """
204
+ # decode the user desire w.r.t. handling of special tokens
205
+ special = None
206
+ if allowed_special == "all":
207
+ special = self.special_tokens
208
+ elif allowed_special == "none":
209
+ special = {}
210
+ elif allowed_special == "none_raise":
211
+ special = {}
212
+ assert all(token not in text for token in self.special_tokens)
213
+ elif isinstance(allowed_special, set):
214
+ special = {k: v for k, v in self.special_tokens.items() if k in allowed_special}
215
+ else:
216
+ raise ValueError(f"allowed_special={allowed_special} not understood")
217
+ if not special:
218
+ # shortcut: if no special tokens, just use the ordinary encoding
219
+ return self.encode_ordinary(text)
220
+ # otherwise, we have to be careful with potential special tokens in text
221
+ # we handle special tokens by splitting the text
222
+ # based on the occurrence of any exact match with any of the special tokens
223
+ # we can use re.split for this. note that surrounding the pattern with ()
224
+ # makes it into a capturing group, so the special tokens will be included
225
+ special_pattern = "(" + "|".join(re.escape(k) for k in special) + ")"
226
+ special_chunks = re.split(special_pattern, text)
227
+ # now all the special characters are separated from the rest of the text
228
+ # all chunks of text are encoded separately, then results are joined
229
+ ids = []
230
+ for part in special_chunks:
231
+ if part in special:
232
+ # this is a special token, encode it separately as a special case
233
+ ids.append(special[part])
234
+ else:
235
+ # this is an ordinary sequence, encode it normally
236
+ ids.extend(self.encode_ordinary(part))
237
+ return ids
238
+
239
+
240
+
241
+ #print(len(texts))
242
+ tokenizer = HindiTokenizer()
243
+ tokenizer.load_bpe_vocab("hindi_bpe_vocab.model")