Spaces:
Runtime error
Runtime error
Commit
·
87736db
1
Parent(s):
9122642
Update app.py
Browse files
app.py
CHANGED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import pysbd
|
| 3 |
+
from transformers import pipeline
|
| 4 |
+
from sentence_transformers import CrossEncoder
|
| 5 |
+
from transformers import AutoTokenizer, AutoModelWithLMHead, pipeline
|
| 6 |
+
|
| 7 |
+
model_name = "MaRiOrOsSi/t5-base-finetuned-question-answering"
|
| 8 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 9 |
+
model = AutoModelWithLMHead.from_pretrained(model_name)
|
| 10 |
+
|
| 11 |
+
#from transformers import pipeline
|
| 12 |
+
|
| 13 |
+
#text2text_generator = pipeline("text2text-generation", model = "gpt2")
|
| 14 |
+
|
| 15 |
+
sentence_segmenter = pysbd.Segmenter(language='en',clean=False)
|
| 16 |
+
passage_retreival_model = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
|
| 17 |
+
qa_model = pipeline("question-answering",'a-ware/bart-squadv2')
|
| 18 |
+
|
| 19 |
+
def fetch_answers(question, clincal_note ):
|
| 20 |
+
clincal_note_paragraphs = clincal_note.splitlines()
|
| 21 |
+
query_paragraph_list = [(question, para) for para in clincal_note_paragraphs if len(para.strip()) > 0 ]
|
| 22 |
+
|
| 23 |
+
scores = passage_retreival_model.predict(query_paragraph_list)
|
| 24 |
+
top_5_indices = scores.argsort()[-5:]
|
| 25 |
+
top_5_query_paragraph_list = [query_paragraph_list[i] for i in top_5_indices ]
|
| 26 |
+
top_5_query_paragraph_list.reverse()
|
| 27 |
+
|
| 28 |
+
top_5_query_paragraph_answer_list = ""
|
| 29 |
+
count = 1
|
| 30 |
+
for query, passage in top_5_query_paragraph_list:
|
| 31 |
+
passage_sentences = sentence_segmenter.segment(passage)
|
| 32 |
+
answer = qa_model(question = query, context = passage)['answer']
|
| 33 |
+
evidence_sentence = ""
|
| 34 |
+
for i in range(len(passage_sentences)):
|
| 35 |
+
if answer.startswith('.') or answer.startswith(':'):
|
| 36 |
+
answer = answer[1:].strip()
|
| 37 |
+
if answer in passage_sentences[i]:
|
| 38 |
+
evidence_sentence = evidence_sentence + " " + passage_sentences[i]
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
model_input = f"question: {query} context: {evidence_sentence}"
|
| 42 |
+
#output_answer = text2text_generator(model_input)[0]['generated_text']
|
| 43 |
+
encoded_input = tokenizer([model_input],
|
| 44 |
+
return_tensors='pt',
|
| 45 |
+
max_length=512,
|
| 46 |
+
truncation=True)
|
| 47 |
+
|
| 48 |
+
output = model.generate(input_ids = encoded_input.input_ids,
|
| 49 |
+
attention_mask = encoded_input.attention_mask)
|
| 50 |
+
output_answer = tokenizer.decode(output[0], skip_special_tokens=True)
|
| 51 |
+
|
| 52 |
+
result_str = "# ANSWER "+str(count)+": "+ output_answer +"\n"
|
| 53 |
+
result_str = result_str + "REFERENCE: "+ evidence_sentence + "\n\n"
|
| 54 |
+
top_5_query_paragraph_answer_list += result_str
|
| 55 |
+
count+=1
|
| 56 |
+
|
| 57 |
+
return top_5_query_paragraph_answer_list
|
| 58 |
+
|
| 59 |
+
demo = gr.Interface(
|
| 60 |
+
fn=fetch_answers,
|
| 61 |
+
#take input as real time audio and use OPENAPI whisper for S2T
|
| 62 |
+
#clinical note upload as file (.This is an example of simple text. or doc/docx file)
|
| 63 |
+
inputs=[gr.Textbox(lines=2, label='Question', show_label=True),
|
| 64 |
+
gr.Textbox(lines=10, label='Document Text', show_label=True)],
|
| 65 |
+
outputs="markdown",
|
| 66 |
+
examples='.',
|
| 67 |
+
title='Document Question Answering System with Evidence from document'
|
| 68 |
+
)
|
| 69 |
+
demo.launch()
|