|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
|
import gradio as gr |
|
import torch |
|
|
|
model_id = "tiiuae/falcon-rw-1b" |
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
model = AutoModelForCausalLM.from_pretrained(model_id) |
|
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1) |
|
|
|
def chat(user_input, history): |
|
prompt = "" |
|
for user, bot in history: |
|
prompt += f"User: {user}\nBot: {bot}\n" |
|
prompt += f"User: {user_input}\nBot:" |
|
|
|
response = pipe(prompt, max_new_tokens=128, do_sample=True, temperature=0.7)[0]["generated_text"] |
|
reply = response.split("Bot:")[-1].strip() |
|
history.append((user_input, reply)) |
|
return history, history |
|
|
|
gr.ChatInterface(chat, chatbot=gr.Chatbot(), title="Lightweight Chatbot").launch() |
|
|