File size: 1,052 Bytes
4bbe9fc
e61453c
bcee0a0
4bbe9fc
 
 
 
 
 
bcee0a0
e61453c
4bbe9fc
e61453c
 
 
 
bcee0a0
4bbe9fc
 
 
 
 
 
 
 
e61453c
bcee0a0
4bbe9fc
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import gradio as gr

# Load the tokenizer and model from Hugging Face
tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2")
model = GPT2LMHeadModel.from_pretrained("distilgpt2")

# Ensure the model doesn't generate any special tokens like <pad>
tokenizer.pad_token = tokenizer.eos_token

def chat(message, history):
    # Prepare the conversation history
    full_prompt = ""
    for user, bot in history:
        full_prompt += f"User: {user}\nBot: {bot}\n"
    full_prompt += f"User: {message}\nBot:"

    # Tokenize the input and generate a response
    inputs = tokenizer(full_prompt, return_tensors="pt")
    outputs = model.generate(inputs["input_ids"], max_length=150, num_return_sequences=1, no_repeat_ngram_size=2)
    reply = tokenizer.decode(outputs[0], skip_special_tokens=True)

    # Extract only the new reply
    reply = reply.split("Bot:")[-1].strip()

    return reply

# Set up the Gradio interface
gr.ChatInterface(fn=chat, title="Simple Chatbot with DistilGPT-2").launch()