Spaces:
Running
Running
File size: 590 Bytes
d170fb3 a5cd913 d170fb3 d5d2fa8 d170fb3 fe5503a a5cd913 fe5503a a5cd913 fe5503a a5cd913 fe5503a a5cd913 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "tiiuae/falcon-rw-1b"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
def chat_with_user(message, history=[]):
prompt = message
result = generator(prompt, max_new_tokens=200)[0]['generated_text']
history.append((message, result.strip()))
return history, history
gr.ChatInterface(chat_with_user, title="Expert Chat Assistant").launch() |