llama_tokenizer / app.py
teragron's picture
Create app.py
25e2a11
raw
history blame
593 Bytes
import gradio as gr
from transformers import LlamaTokenizer
# Load the tokenizer from the specific folder
tokenizer = LlamaTokenizer.from_pretrained("llama_tokenizer")
def tokenize(prompt):
tokens = tokenizer.encode(prompt, add_special_tokens=False)
num_tokens = len(tokens)
return ("Number of tokens:", num_tokens)
with gr.Blocks() as demo:
gr.Markdown("Token Counter for LLAMA")
with gr.Row():
inp = gr.Textbox(placeholder="Enter prompt")
out = gr.Textbox()
btn = gr.Button("Run")
btn.click(fn=tokenize, inputs=inp, outputs=out)
demo.launch()