bard-chatbot / app.py
Kvikontent's picture
Update app.py
cdb3da1
raw
history blame
916 Bytes
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
# Load the BART tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("facebook/bart-base")
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/bart-base")
def generate_response(user_input):
inputs = tokenizer(user_input, return_tensors="pt", max_length=512, truncation=True)
outputs = model.generate(**inputs)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
input_textbox = gr.Textbox(lines=10, label="Enter your text here")
output_textbox = gr.outputs.Textbox(label="Chatbot Response")
chatbot_interface = gr.Interface(fn=generate_response, inputs=input_textbox, outputs=output_textbox, title="Hugging Face BART Chatbot", description="This chatbot uses the Hugging Face BART model to generate responses based on user input.")
chatbot_interface.launch()