work-qa / app.py
sundea's picture
Update app.py
fd38abb
raw
history blame
749 Bytes
from transformers import AutoModelForQuestionAnswering,AutoTokenizer,pipeline
import gradio as gr
model = AutoModelForQuestionAnswering.from_pretrained('sundea/work-qa')
tokenizer = AutoTokenizer.from_pretrained('sundea/work-qa')
QA = pipeline('question-answering', model=model, tokenizer=tokenizer)
def get_out(text1,text2):
QA_input={'question':text1,'context':text2}
res=QA(QA_input)
# res['answer']
return res['answer']
with gr.Blocks() as demo:
with gr.Row():
question = gr.Textbox(label='question')
greet_btn = gr.Button('compute')
context=gr.Textbox(label='context')
res=gr.Textbox(label='result')
greet_btn.click(fn=get_out,inputs=[question,context],outputs=res)
demo.launch()