token-explorer / app.py
christopher's picture
Update app.py
46aec1c
raw
history blame
1.62 kB
import gradio as gr
from faiss import IndexFlatIP
import pandas as pd
import numpy as np
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased")
input_embeddings = np.load("bert_input_embeddings.npy")
index = IndexFlatIP(input_embeddings.shape[-1])
index.add(input_embeddings)
vocab = {v:k for k,v in tokenizer.vocab.items()}
lookup_table = pd.Series(vocab).sort_index()
def get_first_subword(word):
try:
return tokenizer.vocab[word]
except:
return tokenizer(word, add_special_tokens=False)['input_ids'][0]
def search(token_to_lookup, num_neighbors=50):
i = get_first_subword(token_to_lookup)
_ , I = index.search(input_embeddings[i:i+1], num_neighbors)
hits = lookup_table.take(I[0])
results = hits.values[1:]
return [r for r in results if not "##" in r], [[r for r in results if "##" in r]]
iface = gr.Interface(
fn=search,
#inputs=[gr.Textbox(lines=1, label="Vocabulary Token", placeholder="Enter token..."), gr.Number(value=50, label="number of neighbors")],
inputs=gr.Textbox(lines=1, label="Vocabulary Token", placeholder="Enter token..."),
outputs=[gr.Textbox(label="Nearest tokens"), gr.Textbox(label="Nearest subwords")],
examples=[
["##logy"],
["##ness"],
["##nity"],
["responded"],
["queen"],
["king"],
["hospital"],
["disease"],
["grammar"],
["philosophy"],
["aristotle"],
["##ting"],
["woman"],
["man"]
],
)
iface.launch(enable_queue=True, debug=True, show_error=True)