File size: 1,662 Bytes
162172f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4050244
162172f
4050244
 
 
 
 
 
 
 
 
 
162172f
 
4050244
162172f
 
4050244
 
 
 
05467dd
162172f
 
 
 
 
4050244
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model once
model_name = "HuggingFaceTB/SmolLM-1.7B"
model = AutoModelForCausalLM.from_pretrained(model_name)

# Define a list of five different tokenizers to use
tokenizer_names = [
    "HuggingFaceTB/SmolLM-1.7B",  # Model's default tokenizer
    "gpt2",                       # GPT-2 tokenizer
    "distilbert-base-uncased",    # DistilBERT tokenizer
    "bert-base-uncased",          # BERT tokenizer
    "roberta-base"                # RoBERTa tokenizer
]

# Load all the tokenizers
tokenizers = {name: AutoTokenizer.from_pretrained(name) for name in tokenizer_names}

def generate_responses(prompt, selected_tokenizers):
    responses = {}
    for name in selected_tokenizers:
        tokenizer = tokenizers.get(name)
        if tokenizer:
            try:
                inputs = tokenizer(prompt, return_tensors="pt")
                outputs = model.generate(**inputs)
                response = tokenizer.decode(outputs[0], skip_special_tokens=True)
                responses[name] = response
            except Exception as e:
                responses[name] = f"Error: {str(e)}"
    return responses

# Gradio interface setup with checkboxes for tokenizers
interface = gr.Interface(
    fn=generate_responses,
    inputs=[
        gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
        gr.CheckboxGroup(choices=tokenizer_names, label="Select tokenizers to use")
    ],
    outputs=gr.JSON(),
    title="Tokenizer Comparison",
    description="Compare model outputs with different tokenizers"
)

# Launch the Gradio interface
interface.launch()