File size: 3,168 Bytes
cf5659c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import os

import numpy as np
import wandb
import json
import argparse

RANDOM_BASELINE={
    "arc_challenge": 0.2502, # Source: https://arxiv.org/pdf/1803.05457.pdf table 6
    "arc_easy": 0.2502, # Source: https://arxiv.org/pdf/1803.05457.pdf table 6
    "boolq": 0.5,
    "copa": 0.5,
    "headqa_en": 0.25,
    "hellaswag": 0.25,
    "lambada": 0., # Safe to say that random models won't perform well at all.
    "logiqa": 0.25,
    "mathqa": (4360 * 1/ 5 - (4475 - 4360) * 1/ 4) / 4475,
    "mrpc": 0.5,
    "multirc": 0., # TODO: I couldn't figure it out
    "openbookqa": 0.25,
    "piqa": 0.5,
    "prost": 0.25,
    "pubmedqa": 1/3,
    "qnli": 0.5,
    "qqp": 0.5,
    "race": 0.25, # Source: https://arxiv.org/pdf/1704.04683.pdf table 5
    "rte": 0.5,
    "sciq": 0.25,
    "sst": 0.5,
    "triviaqa": 0.,
    "webqs": 0.,
    "wic": 0.5,
    "winogrande": 0.5,
    "wnli": 0.5,
    "wsc": 0.5
}

def normalise(score, task):
    return (score - RANDOM_BASELINE[task]) / (1. - RANDOM_BASELINE[task])

def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--input_files", type=lambda s: s.split(','), required=True)
    parser.add_argument("--all_tasks", action="store_true")
    parser.add_argument("--naive_average", action="store_true")
    parser.add_argument("--acc_average", action="store_true")
    parser.add_argument("--normalised_acc_average", action="store_true")
    return parser.parse_args()

def main():
    args = parse_args()
    for input_file in args.input_files:
        assert os.path.basename(input_file).endswith("_agg.json")
        experiment_name = os.path.basename(input_file).split("_agg.json")[0]
        with open(input_file, "r") as fi:
            experiment = json.load(fi)

        results = experiment["results"]
        tokens = experiment["tokens"]
        run = wandb.init(project="bigscience-tr3-evaluation-through-training", entity="timerobber", name=experiment_name,
                         reinit=True)
        for i, n_tokens in enumerate(tokens):
            all_values = []
            acc_average = []
            normalised_acc_average = []
            for task, task_results in results.items():
                values = None
                for metric, values in task_results.items():
                    if args.all_tasks:
                        wandb.log({f"{task}_{metric}": values[i], "tokens": tokens[i]})
                    if "stderr" not in metric and "ppl" not in metric:
                        all_values.append(values[i])
                        if metric == "acc":
                            acc_average.append(values[i])
                            normalised_acc_average.append(normalise(values[i], task))
            if args.naive_average:
                wandb.log({f"naive_average": np.mean(all_values), "tokens": tokens[i]})
            if args.acc_average:
                wandb.log({f"acc_average": np.mean(acc_average), "tokens": tokens[i]})
            if args.normalised_acc_average:
                wandb.log({f"normalised_acc_average": np.mean(normalised_acc_average), "tokens": tokens[i]})

        run.finish()

if __name__ == "__main__":
    main()