Update app.py
Browse files
app.py
CHANGED
|
@@ -108,7 +108,7 @@ evidence_text=st.text_area("Enter your evidence:")
|
|
| 108 |
# form_evidence.text_input(label='Enter your evidence')
|
| 109 |
# evidence_text = form_evidence.form_submit_button(label='Submit')
|
| 110 |
|
| 111 |
-
if evidence_text:
|
| 112 |
st.caption(':green[Kindly hold on for a few minutes while the QA pairs are being generated]')
|
| 113 |
#st.caption(':blue[At times, you may encounter null/none outputs, which could be a result of a delay in loading the models through the API. If you experience this problem, kindly try again after a few minutes.]')
|
| 114 |
|
|
@@ -329,7 +329,7 @@ def claim(text):
|
|
| 329 |
data=df[["claim","who","what","why","when","where"]].copy()
|
| 330 |
return data
|
| 331 |
#-------------------------------------------------------------------------
|
| 332 |
-
@st.cache
|
| 333 |
def split_ws(input_list, delimiter="<sep>"):
|
| 334 |
output_list = []
|
| 335 |
for item in input_list:
|
|
@@ -341,14 +341,14 @@ def split_ws(input_list, delimiter="<sep>"):
|
|
| 341 |
return output_list
|
| 342 |
|
| 343 |
#--------------------------------------------------------------------------
|
| 344 |
-
@st.cache
|
| 345 |
def calc_rouge_l_score(list_of_evidence, list_of_ans):
|
| 346 |
scorer = rouge_scorer.RougeScorer(['rougeL'], use_stemmer=True)
|
| 347 |
scores = scorer.score(' '.join(list_of_evidence), ' '.join(list_of_ans))
|
| 348 |
return scores['rougeL'].fmeasure
|
| 349 |
#-------------------------------------------------------------------------
|
| 350 |
|
| 351 |
-
@st.cache
|
| 352 |
def rephrase_question_who(question):
|
| 353 |
if not question.lower().startswith("who"):
|
| 354 |
words = question.split()
|
|
@@ -409,7 +409,7 @@ def gen_qa_who(df):
|
|
| 409 |
rouge_l_scores="Not verifiable"
|
| 410 |
return list_of_ques_who,list_of_ans_who,rouge_l_scores,list_of_evidence_answer_who
|
| 411 |
#------------------------------------------------------------
|
| 412 |
-
@st.cache
|
| 413 |
def rephrase_question_what(question):
|
| 414 |
if not question.lower().startswith("what"):
|
| 415 |
words = question.split()
|
|
@@ -470,7 +470,7 @@ def gen_qa_what(df):
|
|
| 470 |
rouge_l_scores="Not verifiable"
|
| 471 |
return list_of_ques_what,list_of_ans_what,rouge_l_scores,list_of_evidence_answer_what
|
| 472 |
#----------------------------------------------------------
|
| 473 |
-
@st.cache
|
| 474 |
def rephrase_question_why(question):
|
| 475 |
if not question.lower().startswith("why"):
|
| 476 |
words = question.split()
|
|
@@ -532,7 +532,7 @@ def gen_qa_why(df):
|
|
| 532 |
return list_of_ques_why,list_of_ans_why,rouge_l_scores,list_of_evidence_answer_why
|
| 533 |
|
| 534 |
#---------------------------------------------------------
|
| 535 |
-
@st.cache
|
| 536 |
def rephrase_question_when(question):
|
| 537 |
if not question.lower().startswith("when"):
|
| 538 |
words = question.split()
|
|
@@ -593,7 +593,7 @@ def gen_qa_when(df):
|
|
| 593 |
return list_of_ques_when,list_of_ans_when,rouge_l_scores,list_of_evidence_answer_when
|
| 594 |
|
| 595 |
#------------------------------------------------------
|
| 596 |
-
@st.cache
|
| 597 |
def rephrase_question_where(question):
|
| 598 |
if not question.lower().startswith("where"):
|
| 599 |
words = question.split()
|
|
@@ -675,6 +675,9 @@ def gen_qa_where(df):
|
|
| 675 |
# # st.table(final_df)
|
| 676 |
|
| 677 |
if claim_text and evidence_text:
|
|
|
|
|
|
|
|
|
|
| 678 |
df=claim(claim_text)
|
| 679 |
df["evidence"]=evidence_text
|
| 680 |
final_df = pd.DataFrame(columns=['Who Claims', 'What Claims', 'When Claims', 'Where Claims', 'Why Claims'])
|
|
|
|
| 108 |
# form_evidence.text_input(label='Enter your evidence')
|
| 109 |
# evidence_text = form_evidence.form_submit_button(label='Submit')
|
| 110 |
|
| 111 |
+
# if evidence_text:
|
| 112 |
st.caption(':green[Kindly hold on for a few minutes while the QA pairs are being generated]')
|
| 113 |
#st.caption(':blue[At times, you may encounter null/none outputs, which could be a result of a delay in loading the models through the API. If you experience this problem, kindly try again after a few minutes.]')
|
| 114 |
|
|
|
|
| 329 |
data=df[["claim","who","what","why","when","where"]].copy()
|
| 330 |
return data
|
| 331 |
#-------------------------------------------------------------------------
|
| 332 |
+
# @st.cache
|
| 333 |
def split_ws(input_list, delimiter="<sep>"):
|
| 334 |
output_list = []
|
| 335 |
for item in input_list:
|
|
|
|
| 341 |
return output_list
|
| 342 |
|
| 343 |
#--------------------------------------------------------------------------
|
| 344 |
+
# @st.cache
|
| 345 |
def calc_rouge_l_score(list_of_evidence, list_of_ans):
|
| 346 |
scorer = rouge_scorer.RougeScorer(['rougeL'], use_stemmer=True)
|
| 347 |
scores = scorer.score(' '.join(list_of_evidence), ' '.join(list_of_ans))
|
| 348 |
return scores['rougeL'].fmeasure
|
| 349 |
#-------------------------------------------------------------------------
|
| 350 |
|
| 351 |
+
# @st.cache
|
| 352 |
def rephrase_question_who(question):
|
| 353 |
if not question.lower().startswith("who"):
|
| 354 |
words = question.split()
|
|
|
|
| 409 |
rouge_l_scores="Not verifiable"
|
| 410 |
return list_of_ques_who,list_of_ans_who,rouge_l_scores,list_of_evidence_answer_who
|
| 411 |
#------------------------------------------------------------
|
| 412 |
+
# @st.cache
|
| 413 |
def rephrase_question_what(question):
|
| 414 |
if not question.lower().startswith("what"):
|
| 415 |
words = question.split()
|
|
|
|
| 470 |
rouge_l_scores="Not verifiable"
|
| 471 |
return list_of_ques_what,list_of_ans_what,rouge_l_scores,list_of_evidence_answer_what
|
| 472 |
#----------------------------------------------------------
|
| 473 |
+
# @st.cache
|
| 474 |
def rephrase_question_why(question):
|
| 475 |
if not question.lower().startswith("why"):
|
| 476 |
words = question.split()
|
|
|
|
| 532 |
return list_of_ques_why,list_of_ans_why,rouge_l_scores,list_of_evidence_answer_why
|
| 533 |
|
| 534 |
#---------------------------------------------------------
|
| 535 |
+
# @st.cache
|
| 536 |
def rephrase_question_when(question):
|
| 537 |
if not question.lower().startswith("when"):
|
| 538 |
words = question.split()
|
|
|
|
| 593 |
return list_of_ques_when,list_of_ans_when,rouge_l_scores,list_of_evidence_answer_when
|
| 594 |
|
| 595 |
#------------------------------------------------------
|
| 596 |
+
# @st.cache
|
| 597 |
def rephrase_question_where(question):
|
| 598 |
if not question.lower().startswith("where"):
|
| 599 |
words = question.split()
|
|
|
|
| 675 |
# # st.table(final_df)
|
| 676 |
|
| 677 |
if claim_text and evidence_text:
|
| 678 |
+
st.write("You entered: ", claim_text)
|
| 679 |
+
st.write("You entered: ", evidence_text)
|
| 680 |
+
st.caption(':green[Kindly hold on for a few minutes while the QA pairs are being generated]')
|
| 681 |
df=claim(claim_text)
|
| 682 |
df["evidence"]=evidence_text
|
| 683 |
final_df = pd.DataFrame(columns=['Who Claims', 'What Claims', 'When Claims', 'Where Claims', 'Why Claims'])
|