File size: 2,367 Bytes
a9dbdfb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import html
import re

from datasets import load_metric


def general_detokenize(string):
    string = re.sub(r"\s+([.,;:!?)])", r"\1", string)
    string = re.sub(r"(\s+|^)\(\s+([^)]+)\s+\)", r"\1(\2)", string)
    string = re.sub(r"(\s+|^)\[\s+([^)]+)\s+\]", r"\1[\2]", string)
    string = re.sub(r'(\s+|^)"\s+([^"]+)\s+"', r'\1"\2"', string)
    string = re.sub(r"(\s+|^)'\s+([^']+)\s+'", r"\1'\2'", string)
    return string


def process_doc(string):
    string = html.unescape(string)
    string = general_detokenize(string)
    return string


def process_wic_docs(dataset):
    def _helper(doc):
        # there's some issues with the encoding on this one
        doc["sentence1"] = (
            process_doc(doc["sentence1"]).encode("latin-1").decode("utf-8")
        )
        doc["sentence2"] = (
            process_doc(doc["sentence2"]).encode("latin-1").decode("utf-8")
        )
        return doc

    return dataset.map(_helper)


def coref_doc_to_text(x):
    def _span_in_context(span_index, span_text):
        span_start = span_index
        span_end = span_start + len(span_text.split(" ")) - 1
        tokens[span_start] = f"*{tokens[span_start]}"
        tokens[span_end] = f"{tokens[span_end]}*"

    tokens = x["text"].split(" ")
    _span_in_context(x["span1_index"], x["span1_text"])
    _span_in_context(
        x["span2_index"] - 1, x["span2_text"]
    )  # span1_index is 0-based but span2_index is 1-based ??
    context = process_doc(" ".join(tokens))
    span_1 = process_doc(x["span1_text"])
    span_2 = process_doc(x["span2_text"])
    text = (
        f"Testua: {context}\n"
        + f'Galdera: Aurreko testuan, "*{span_1}*" eta "*{span_2}*" gauza bera dira?\n'
        + "Erantzuna:"
    )
    return text


# Measure F1 as in the benchmark repo: https://github.com/orai-nlp/BasqueGLUE/blob/main/eval_basqueglue.py


def micro_f1_score(items):
    f1_metric = load_metric("f1")
    golds, preds = list(zip(*items))
    f1_score = f1_metric.compute(references=golds, predictions=preds, average="micro")[
        "f1"
    ]
    return f1_score


def vaxx_f1_score(items):
    f1_metric = load_metric("f1")
    golds, preds = list(zip(*items))
    f1_class = f1_metric.compute(
        references=golds, predictions=preds, labels=[0, 2], average=None
    )["f1"]
    f1_score = sum(f1_class) / len(f1_class)
    return f1_score