AkashDataScience commited on
Commit
41dfb3a
·
1 Parent(s): d716035

First commit

Browse files
app.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from language_bpe import BPETokenizer
4
+
5
+ tokenizer = BPETokenizer()
6
+ tokenizer.load('models/english_5000.model')
7
+
8
+ def inference(input_text):
9
+ tokens = tokenizer.encode_ordinary(input_text)
10
+
11
+ return tokens
12
+
13
+ title = "A bilingual tokenizer build using opus and wikipedia data"
14
+ description = "A simple Gradio interface to see tokenization of Hindi and English(Hinglish) text"
15
+ examples = [["He walked into the basement with the horror movie from the night before playing in his head."],
16
+ ["Henry couldn't decide if he was an auto mechanic or a priest."],
17
+ ["Poison ivy grew through the fence they said was impenetrable."],
18
+ ]
19
+ demo = gr.Interface(
20
+ inference,
21
+ inputs = [
22
+ gr.Textbox(label="Enter any sentence in Hindi, English or both language", type="text"),
23
+ ],
24
+ outputs = [
25
+ gr.Textbox(label="Output", type="text")
26
+ ],
27
+ title = title,
28
+ description = description,
29
+ examples = examples,
30
+ )
31
+ demo.launch()
language_bpe/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .base import Tokenizer
2
+ from .bpe_tokenizer import BPETokenizer
language_bpe/base.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unicodedata
2
+
3
+ def get_stats(ids, counts=None):
4
+ counts = {} if counts is None else counts
5
+ for pair in zip(ids, ids[1:]):
6
+ counts[pair] = counts.get(pair, 0) + 1
7
+ return counts
8
+
9
+ def merge(ids, pair, idx):
10
+ newids = []
11
+ i = 0
12
+ while i < len(ids):
13
+ if ids[i] == pair[0] and i < len(ids) - 1 and ids[i+1] == pair[1]:
14
+ newids.append(idx)
15
+ i += 2
16
+ else:
17
+ newids.append(ids[i])
18
+ i += 1
19
+ return newids
20
+
21
+ def merge_hindi(ids, pair, idx):
22
+ newids = []
23
+ i = 0
24
+ while i < len(ids):
25
+ # if not at the very last position AND the pair matches, replace it
26
+ if ids[i] == pair[0] and i < len(ids) - 2 and ids[i+1] == pair[1] and ids[i+2] == pair[2]:
27
+ newids.append(idx)
28
+ i += 3
29
+ else:
30
+ newids.append(ids[i])
31
+ i += 1
32
+ return newids
33
+
34
+ def replace_control_characters(s):
35
+ chars = []
36
+ for ch in s:
37
+ if unicodedata.category(ch)[0] != "C":
38
+ chars.append(ch)
39
+ else:
40
+ chars.append(f"\\u{ord(ch):04x}")
41
+ return "".join(chars)
42
+
43
+ def render_token(t):
44
+ s = t.decode('utf-8', errors='replace')
45
+ s = replace_control_characters(s)
46
+ return s
47
+
48
+ class Tokenizer:
49
+ def __init__(self):
50
+ self.merges = {}
51
+ self.pattern = ""
52
+ self.special_tokens = {}
53
+ self.vocab = self._build_vocab()
54
+
55
+ def build(self, text, vocab_size, verbose=False):
56
+ raise NotImplementedError
57
+
58
+ def encode(self, text):
59
+ raise NotImplementedError
60
+
61
+ def decode(self, ids):
62
+ raise NotImplementedError
63
+
64
+ def _build_vocab(self):
65
+ # vocab is simply and deterministically derived from merges
66
+ vocab = {idx: bytes([idx]) for idx in range(256)}
67
+ for (p0, p1), idx in self.merges.items():
68
+ vocab[idx] = vocab[p0] + vocab[p1]
69
+ for special, idx in self.special_tokens.items():
70
+ vocab[idx] = special.encode("utf-8")
71
+ return vocab
72
+
73
+ def save(self, file_prefix):
74
+ """
75
+ Saves two files: file_prefix.vocab and file_prefix.model
76
+ This is inspired (but not equivalent to!) sentencepiece's model saving:
77
+ - model file is the critical one, intended for load()
78
+ - vocab file is just a pretty printed version for human inspection only
79
+ """
80
+ # write the model: to be used in load() later
81
+ model_file = file_prefix + ".model"
82
+ with open(model_file, 'w') as f:
83
+ # write the version, pattern and merges, that's all that's needed
84
+ f.write("minbpe v1\n")
85
+ f.write(f"{self.pattern}\n")
86
+ # write the special tokens, first the number of them, then each one
87
+ f.write(f"{len(self.special_tokens)}\n")
88
+ for special, idx in self.special_tokens.items():
89
+ f.write(f"{special} {idx}\n")
90
+ # the merges dict
91
+ for idx1, idx2 in self.merges:
92
+ f.write(f"{idx1} {idx2}\n")
93
+ # write the vocab: for the human to look at
94
+ vocab_file = file_prefix + ".vocab"
95
+ inverted_merges = {idx: pair for pair, idx in self.merges.items()}
96
+ with open(vocab_file, "w", encoding="utf-8") as f:
97
+ for idx, token in self.vocab.items():
98
+ # note: many tokens may be partial utf-8 sequences
99
+ # and cannot be decoded into valid strings. Here we're using
100
+ # errors='replace' to replace them with the replacement char �.
101
+ # this also means that we couldn't possibly use .vocab in load()
102
+ # because decoding in this way is a lossy operation!
103
+ s = render_token(token)
104
+ # find the children of this token, if any
105
+ if idx in inverted_merges:
106
+ # if this token has children, render it nicely as a merge
107
+ idx0, idx1 = inverted_merges[idx]
108
+ s0 = render_token(self.vocab[idx0])
109
+ s1 = render_token(self.vocab[idx1])
110
+ f.write(f"[{s0}][{s1}] -> [{s}] {idx}\n")
111
+ else:
112
+ # otherwise this is leaf token, just print it
113
+ # (this should just be the first 256 tokens, the bytes)
114
+ f.write(f"[{s}] {idx}\n")
115
+
116
+ def load(self, model_file):
117
+ """Inverse of save() but only for the model file"""
118
+ assert model_file.endswith(".model")
119
+ # read the model file
120
+ merges = {}
121
+ special_tokens = {}
122
+ idx = 256
123
+ with open(model_file, 'r', encoding="utf-8") as f:
124
+ # read the version
125
+ version = f.readline().strip()
126
+ assert version == "minbpe v1"
127
+ # read the pattern
128
+ self.pattern = f.readline().strip()
129
+ # read the special tokens
130
+ num_special = int(f.readline().strip())
131
+ for _ in range(num_special):
132
+ special, special_idx = f.readline().strip().split()
133
+ special_tokens[special] = int(special_idx)
134
+ # read the merges
135
+ for line in f:
136
+ idx1, idx2 = map(int, line.split())
137
+ merges[(idx1, idx2)] = idx
138
+ idx += 1
139
+ self.merges = merges
140
+ self.special_tokens = special_tokens
141
+ self.vocab = self._build_vocab()
language_bpe/bpe_tokenizer.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import regex as re
3
+ from tqdm import tqdm
4
+ from .base import Tokenizer, get_stats, merge, merge_hindi
5
+
6
+ GPT4_SPLIT_PATTERN = r"""'(?i:[sdmt]|ll|ve|re)|[^\r\n\p{L}\p{N}]?+\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]++[\r\n]*|\s*[\r\n]|\s+(?!\S)|\s+"""
7
+
8
+ class BPETokenizer(Tokenizer):
9
+
10
+ def __init__(self, pattern=None, word_pattern = None):
11
+ super().__init__()
12
+ self.pattern = GPT4_SPLIT_PATTERN if pattern is None else pattern
13
+ self.compiled_pattern = re.compile(self.pattern)
14
+ self.word_pattern = None
15
+ self.compiled_pattern_word = None
16
+ if word_pattern:
17
+ self.word_pattern = word_pattern
18
+ self.compiled_pattern_word = re.compile(self.word_pattern)
19
+ self.special_tokens = {}
20
+ self.inverse_special_tokens = {}
21
+
22
+ def build(self, text, vocab_size, verbose=False):
23
+
24
+ text_chunks = re.findall(self.compiled_pattern, text)
25
+
26
+ if self.compiled_pattern_word:
27
+ print("Spliting hindi words")
28
+ text_chunks_words = []
29
+ for chunk in tqdm(text_chunks):
30
+ element_chunks = re.findall(self.compiled_pattern_word, chunk)
31
+ if element_chunks == []:
32
+ text_chunks_words.append(chunk)
33
+ else:
34
+ text_chunks_words.extend(element_chunks[0])
35
+ text_chunks = text_chunks_words
36
+
37
+ # input text preprocessing
38
+ ids = [list(ch.encode("utf-8")) for ch in text_chunks]
39
+
40
+ merges = {}
41
+ vocab = {idx: bytes([idx]) for idx in range(256)}
42
+ vocab.update({idx: bytes(list(chr(value).encode('utf-8'))) for idx,value in zip(range(256, 384), range(2304, 2432))})
43
+
44
+ print("Merging hindi characters in single token")
45
+ for index in tqdm(range(256, 384)):
46
+ pair = list(vocab[index])
47
+ ids = [merge_hindi(chunk_ids, pair, index) for chunk_ids in ids]
48
+
49
+ num_merges = vocab_size - 384
50
+
51
+ original_length = len([x for xs in ids for x in xs])
52
+
53
+ print("Building BPE")
54
+ for i in tqdm(range(num_merges), file=sys.stdout):
55
+ # count the number of times every consecutive pair appears
56
+ stats = {}
57
+ for chunk_ids in ids:
58
+ # passing in stats will update it in place, adding up counts
59
+ get_stats(chunk_ids, stats)
60
+ # find the pair with the highest count
61
+ pair = max(stats, key=stats.get)
62
+ # mint a new token: assign it the next available id
63
+ idx = 384 + i
64
+ # replace all occurrences of pair in ids with idx
65
+ ids = [merge(chunk_ids, pair, idx) for chunk_ids in ids]
66
+ # save the merge
67
+ merges[pair] = idx
68
+ vocab[idx] = vocab[pair[0]] + vocab[pair[1]]
69
+ # prints
70
+ if verbose:
71
+ try:
72
+ tqdm.write(f"merge {i+1}/{num_merges}: {pair} -> {idx} ({vocab[idx].decode('utf-8')}) had {stats[pair]} occurrences")
73
+ except Exception as e:
74
+ tqdm.write(f"merge {i+1}/{num_merges}: {pair} -> {idx} ({vocab[idx]}) had {stats[pair]} occurrences")
75
+
76
+ lenght_after_merging = len([x for xs in ids for x in xs])
77
+
78
+ print(f'Compression ratio: {original_length/lenght_after_merging}')
79
+
80
+ # save class variables
81
+ self.merges = merges # used in encode()
82
+ self.vocab = vocab # used in decode()
83
+
84
+ def register_special_tokens(self, special_tokens):
85
+ # special_tokens is a dictionary of str -> int
86
+ # example: {"<|endoftext|>": 100257}
87
+ self.special_tokens = special_tokens
88
+ self.inverse_special_tokens = {v: k for k, v in special_tokens.items()}
89
+
90
+ def decode(self, ids):
91
+ # given ids (list of integers), return Python string
92
+ part_bytes = []
93
+ for idx in ids:
94
+ if idx in self.vocab:
95
+ part_bytes.append(self.vocab[idx])
96
+ elif idx in self.inverse_special_tokens:
97
+ part_bytes.append(self.inverse_special_tokens[idx].encode("utf-8"))
98
+ else:
99
+ raise ValueError(f"invalid token id: {idx}")
100
+ text_bytes = b"".join(part_bytes)
101
+ text = text_bytes.decode("utf-8", errors="replace")
102
+ return text
103
+
104
+ def _encode_chunk(self, ids):
105
+ # return the token ids
106
+ # let's begin. first, convert all bytes to integers in range 0..255
107
+ while len(ids) >= 2:
108
+ # find the pair with the lowest merge index
109
+ stats = get_stats(ids)
110
+ pair = min(stats, key=lambda p: self.merges.get(p, float("inf")))
111
+ # subtle: if there are no more merges available, the key will
112
+ # result in an inf for every single pair, and the min will be
113
+ # just the first pair in the list, arbitrarily
114
+ # we can detect this terminating case by a membership check
115
+ if pair not in self.merges:
116
+ break # nothing else can be merged anymore
117
+ # otherwise let's merge the best pair (lowest merge index)
118
+ idx = self.merges[pair]
119
+ ids = merge(ids, pair, idx)
120
+ return ids
121
+
122
+ def encode_ordinary(self, text):
123
+ """Encoding that ignores any special tokens."""
124
+ # split text into chunks of text by categories defined in regex pattern
125
+ text_chunks = re.findall(self.compiled_pattern, text)
126
+ if self.compiled_pattern_word:
127
+ print("Spliting hindi words")
128
+ text_chunks_words = []
129
+ for chunk in tqdm(text_chunks):
130
+ element_chunks = re.findall(self.compiled_pattern_word, chunk)
131
+ if element_chunks == []:
132
+ text_chunks_words.append(chunk)
133
+ else:
134
+ text_chunks_words.extend(element_chunks[0])
135
+ text_chunks = text_chunks_words
136
+ # all chunks of text are encoded separately, then results are joined
137
+ ids_list = []
138
+ for chunk in text_chunks:
139
+ chunk_bytes = chunk.encode("utf-8") # raw bytes
140
+ ids = list(chunk_bytes)
141
+ vocab = {idx: bytes([idx]) for idx in range(256)}
142
+ vocab.update({idx: bytes(list(chr(value).encode('utf-8'))) for idx,value in zip(range(256, 384), range(2304, 2432))})
143
+ for index in tqdm(range(256, 384)):
144
+ pair = list(vocab[index])
145
+ ids = [merge_hindi(chunk_ids, pair, index) for chunk_ids in ids]
146
+ chunk_ids = self._encode_chunk(ids)
147
+ ids_list.extend(chunk_ids)
148
+ return ids_list
149
+
150
+ def encode(self, text, allowed_special="none_raise"):
151
+ """
152
+ Unlike encode_ordinary, this function handles special tokens.
153
+ allowed_special: can be "all"|"none"|"none_raise" or a custom set of special tokens
154
+ if none_raise, then an error is raised if any special token is encountered in text
155
+ this is the default tiktoken behavior right now as well
156
+ any other behavior is either annoying, or a major footgun
157
+ """
158
+ # decode the user desire w.r.t. handling of special tokens
159
+ special = None
160
+ if allowed_special == "all":
161
+ special = self.special_tokens
162
+ elif allowed_special == "none":
163
+ special = {}
164
+ elif allowed_special == "none_raise":
165
+ special = {}
166
+ assert all(token not in text for token in self.special_tokens)
167
+ elif isinstance(allowed_special, set):
168
+ special = {k: v for k, v in self.special_tokens.items() if k in allowed_special}
169
+ else:
170
+ raise ValueError(f"allowed_special={allowed_special} not understood")
171
+ if not special:
172
+ # shortcut: if no special tokens, just use the ordinary encoding
173
+ return self.encode_ordinary(text)
174
+ # otherwise, we have to be careful with potential special tokens in text
175
+ # we handle special tokens by splitting the text
176
+ # based on the occurrence of any exact match with any of the special tokens
177
+ # we can use re.split for this. note that surrounding the pattern with ()
178
+ # makes it into a capturing group, so the special tokens will be included
179
+ special_pattern = "(" + "|".join(re.escape(k) for k in special) + ")"
180
+ special_chunks = re.split(special_pattern, text)
181
+ # now all the special characters are separated from the rest of the text
182
+ # all chunks of text are encoded separately, then results are joined
183
+ ids = []
184
+ for part in special_chunks:
185
+ if part in special:
186
+ # this is a special token, encode it separately as a special case
187
+ ids.append(special[part])
188
+ else:
189
+ # this is an ordinary sequence, encode it normally
190
+ ids.extend(self.encode_ordinary(part))
191
+ return ids
models/english_5000.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2b8dbed9bf76124c7660f13499936c2015504e9e29c1a4e4be3af3bbfbb51f0
3
+ size 38534
models/hindi_5000.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b844ea0a4156cc551b5bfdeeadb65c6ff77deb7cbf609518195f521cc6cac66
3
+ size 38844
models/hinglish_5000.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f53512686a6e268b23806738311e5589e604c923ed412ccf9cda7c8a041acb0c
3
+ size 38540