juanpasanper's picture
Update app.py
6ce726d
raw
history blame
650 Bytes
import gradio as gr
model = Model(model_name=este_si_me_sirvio.bin)
model.load_state_dict(torch.load(juanpasanper/tigo_question_answer))
def question_answer(context, question):
predictions, raw_outputs = model.predict([{"context": context, "qas": [{"question": question, "id": "0",}],}])
prediccion = predictions[0]['answer'][0]
return prediccion
iface = gr.Interface(fn=question_answer, inputs=["text", "text"], outputs=["text"],
allow_flagging="manual",
flagging_options=["correcto", "incorrecto"],
flagging_dir='flagged',
enable_queue = True)
iface.launch()