|
import collections |
|
import re |
|
import string |
|
|
|
import datasets |
|
import evaluate |
|
|
|
|
|
def normalize_answer(s): |
|
"""Lower text and remove punctuation, articles and extra whitespace.""" |
|
|
|
def remove_articles(text): |
|
regex = re.compile(r"\b(un|une|des|le|la|les)\b", re.UNICODE) |
|
return re.sub(regex, " ", text) |
|
|
|
def white_space_fix(text): |
|
return " ".join(text.split()) |
|
|
|
def remove_punc(text): |
|
exclude = set(string.punctuation) |
|
return "".join(ch for ch in text if ch not in exclude) |
|
|
|
def lower(text): |
|
return text.lower() |
|
|
|
return white_space_fix(remove_articles(remove_punc(lower(s)))) |
|
|
|
|
|
def get_tokens(s): |
|
if not s: |
|
return [] |
|
return normalize_answer(s).split() |
|
|
|
|
|
|
|
def exact(predictions, references): |
|
return int(normalize_answer(references[0]) == normalize_answer(predictions[0])) |
|
|
|
|
|
|
|
def f1(predictions, references): |
|
gold_toks = get_tokens(references[0]) |
|
pred_toks = get_tokens(predictions[0]) |
|
common = collections.Counter(gold_toks) & collections.Counter(pred_toks) |
|
num_same = sum(common.values()) |
|
if len(gold_toks) == 0 or len(pred_toks) == 0: |
|
|
|
return int(gold_toks == pred_toks) |
|
if num_same == 0: |
|
return 0 |
|
precision = 1.0 * num_same / len(pred_toks) |
|
recall = 1.0 * num_same / len(gold_toks) |
|
f1 = (2 * precision * recall) / (precision + recall) |
|
return f1 |
|
|
|
|
|
def rouge1(items): |
|
""" |
|
# passthrough for efficiency |
|
""" |
|
return items |
|
|
|
|
|
def rouge1_agg(items): |
|
""" |
|
Higher is better |
|
""" |
|
refs = list(zip(*items))[0] |
|
preds = list(zip(*items))[1] |
|
rouge_scorer = evaluate.load("rouge") |
|
return rouge_scorer.compute(predictions=preds, references=refs)["rouge1"] |
|
|
|
|
|
def is_included(items): |
|
""" |
|
# passthrough for efficiency |
|
""" |
|
if items[0] in items[1]: |
|
return True |
|
return False |
|
|
|
|
|
def preprocess(text): |
|
text = text.strip() |
|
|
|
text = text.replace(" [title]", ". ") |
|
text = re.sub("\\[.*?\\]", "", text) |
|
text = text.replace(" ", " ") |
|
return text |
|
|
|
|
|
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: |
|
def _process_doc(doc): |
|
ctx = doc["ctx_a"] + " " + doc["ctx_b"].capitalize() |
|
out_doc = { |
|
"query": preprocess(doc["activity_label"] + ": " + ctx), |
|
"choices": [preprocess(ending) for ending in doc["endings"]], |
|
"gold": int(doc["label"]), |
|
} |
|
return out_doc |
|
|
|
return dataset.map(_process_doc) |
|
|