Spaces:
Paused
Paused
Commit
·
a457627
1
Parent(s):
75eaa7d
Update app.py
Browse files
app.py
CHANGED
|
@@ -17,44 +17,6 @@ def get_prediction(context, question):
|
|
| 17 |
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(inputs['input_ids'][0][answer_start:answer_end]))
|
| 18 |
|
| 19 |
return answer
|
| 20 |
-
|
| 21 |
-
def normalize_text(s):
|
| 22 |
-
"""Removing articles and punctuation, and standardizing whitespace are all typical text processing steps."""
|
| 23 |
-
import string, re
|
| 24 |
-
def remove_articles(text):
|
| 25 |
-
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
|
| 26 |
-
return re.sub(regex, " ", text)
|
| 27 |
-
def white_space_fix(text):
|
| 28 |
-
return " ".join(text.split())
|
| 29 |
-
def remove_punc(text):
|
| 30 |
-
exclude = set(string.punctuation)
|
| 31 |
-
return "".join(ch for ch in text if ch not in exclude)
|
| 32 |
-
def lower(text):
|
| 33 |
-
return text.lower()
|
| 34 |
-
|
| 35 |
-
return white_space_fix(remove_articles(remove_punc(lower(s))))
|
| 36 |
-
|
| 37 |
-
def exact_match(prediction, truth):
|
| 38 |
-
return bool(normalize_text(prediction) == normalize_text(truth))
|
| 39 |
-
|
| 40 |
-
def compute_f1(prediction, truth):
|
| 41 |
-
pred_tokens = normalize_text(prediction).split()
|
| 42 |
-
truth_tokens = normalize_text(truth).split()
|
| 43 |
-
|
| 44 |
-
# if either the prediction or the truth is no-answer then f1 = 1 if they agree, 0 otherwise
|
| 45 |
-
if len(pred_tokens) == 0 or len(truth_tokens) == 0:
|
| 46 |
-
return int(pred_tokens == truth_tokens)
|
| 47 |
-
|
| 48 |
-
common_tokens = set(pred_tokens) & set(truth_tokens)
|
| 49 |
-
|
| 50 |
-
# if there are no common tokens then f1 = 0
|
| 51 |
-
if len(common_tokens) == 0:
|
| 52 |
-
return 0
|
| 53 |
-
|
| 54 |
-
prec = len(common_tokens) / len(pred_tokens)
|
| 55 |
-
rec = len(common_tokens) / len(truth_tokens)
|
| 56 |
-
|
| 57 |
-
return round(2 * (prec * rec) / (prec + rec), 2)
|
| 58 |
|
| 59 |
def question_answer(context, question):
|
| 60 |
prediction = get_prediction(context,question)
|
|
@@ -63,9 +25,9 @@ def question_answer(context, question):
|
|
| 63 |
def greet(texts):
|
| 64 |
question = texts[:len(texts)]
|
| 65 |
answer = texts[len(texts):]
|
| 66 |
-
for question, answer in texts:
|
| 67 |
-
|
| 68 |
-
return
|
| 69 |
|
| 70 |
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
| 71 |
iface.launch()
|
|
|
|
| 17 |
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(inputs['input_ids'][0][answer_start:answer_end]))
|
| 18 |
|
| 19 |
return answer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
def question_answer(context, question):
|
| 22 |
prediction = get_prediction(context,question)
|
|
|
|
| 25 |
def greet(texts):
|
| 26 |
question = texts[:len(texts)]
|
| 27 |
answer = texts[len(texts):]
|
| 28 |
+
# for question, answer in texts:
|
| 29 |
+
# question_answer(context, question)
|
| 30 |
+
return answer
|
| 31 |
|
| 32 |
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
| 33 |
iface.launch()
|