Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lm-evaluation/lm_eval/api/__init__.py +0 -0
- lm-evaluation/lm_eval/api/__pycache__/__init__.cpython-310.pyc +0 -0
- lm-evaluation/lm_eval/api/__pycache__/filter.cpython-310.pyc +0 -0
- lm-evaluation/lm_eval/api/__pycache__/instance.cpython-310.pyc +0 -0
- lm-evaluation/lm_eval/api/__pycache__/metrics.cpython-310.pyc +0 -0
- lm-evaluation/lm_eval/api/__pycache__/model.cpython-310.pyc +0 -0
- lm-evaluation/lm_eval/api/__pycache__/registry.cpython-310.pyc +0 -0
- lm-evaluation/lm_eval/api/__pycache__/samplers.cpython-310.pyc +0 -0
- lm-evaluation/lm_eval/api/__pycache__/task.cpython-310.pyc +0 -0
- lm-evaluation/lm_eval/api/filter.py +56 -0
- lm-evaluation/lm_eval/api/instance.py +38 -0
- lm-evaluation/lm_eval/api/metrics.py +509 -0
- lm-evaluation/lm_eval/api/model.py +346 -0
- lm-evaluation/lm_eval/api/registry.py +172 -0
- lm-evaluation/lm_eval/api/samplers.py +114 -0
- lm-evaluation/lm_eval/api/task.py +1498 -0
- lm-evaluation/lm_eval/caching/cache.py +55 -0
- lm-evaluation/lm_eval/decontamination/__init__.py +0 -0
- lm-evaluation/lm_eval/decontamination/archiver.py +171 -0
- lm-evaluation/lm_eval/decontamination/decontaminate.py +166 -0
- lm-evaluation/lm_eval/decontamination/janitor.py +328 -0
- lm-evaluation/lm_eval/filters/__init__.py +48 -0
- lm-evaluation/lm_eval/filters/__pycache__/__init__.cpython-310.pyc +0 -0
- lm-evaluation/lm_eval/filters/__pycache__/extraction.cpython-310.pyc +0 -0
- lm-evaluation/lm_eval/filters/__pycache__/selection.cpython-310.pyc +0 -0
- lm-evaluation/lm_eval/filters/__pycache__/transformation.cpython-310.pyc +0 -0
- lm-evaluation/lm_eval/filters/decontamination.py +24 -0
- lm-evaluation/lm_eval/filters/extraction.py +183 -0
- lm-evaluation/lm_eval/filters/selection.py +52 -0
- lm-evaluation/lm_eval/filters/transformation.py +52 -0
- lm-evaluation/lm_eval/models/__pycache__/__init__.cpython-310.pyc +0 -0
- lm-evaluation/lm_eval/tasks/kormedmcqa/README.md +47 -0
- lm-evaluation/lm_eval/tasks/kormedmcqa/kormedmcqa_doctor.yaml +27 -0
- lm-evaluation/lm_eval/tasks/kormedmcqa/kormedmcqa_nurse.yaml +27 -0
- lm-evaluation/lm_eval/tasks/kormedmcqa/kormedmcqa_pharm.yaml +27 -0
- lm-evaluation/lm_eval/tasks/pile/README.md +68 -0
- lm-evaluation/lm_eval/tasks/pile/pile_arxiv.yaml +23 -0
- lm-evaluation/lm_eval/tasks/pile/pile_bookcorpus2.yaml +3 -0
- lm-evaluation/lm_eval/tasks/pile/pile_books3.yaml +3 -0
- lm-evaluation/lm_eval/tasks/pile/pile_dm-mathematics.yaml +3 -0
- lm-evaluation/lm_eval/tasks/pile/pile_freelaw.yaml +3 -0
- lm-evaluation/lm_eval/tasks/pile/pile_github.yaml +3 -0
- lm-evaluation/lm_eval/tasks/pile/pile_gutenberg.yaml +3 -0
- lm-evaluation/lm_eval/tasks/pile/pile_hackernews.yaml +3 -0
- lm-evaluation/lm_eval/tasks/pile/pile_nih-exporter.yaml +3 -0
- lm-evaluation/lm_eval/tasks/pile/pile_opensubtitles.yaml +3 -0
- lm-evaluation/lm_eval/tasks/pile/pile_openwebtext2.yaml +3 -0
- lm-evaluation/lm_eval/tasks/pile/pile_philpapers.yaml +3 -0
- lm-evaluation/lm_eval/tasks/pile/pile_pile-cc.yaml +3 -0
- lm-evaluation/lm_eval/tasks/pile/pile_pubmed-abstracts.yaml +3 -0
lm-evaluation/lm_eval/api/__init__.py
ADDED
File without changes
|
lm-evaluation/lm_eval/api/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (157 Bytes). View file
|
|
lm-evaluation/lm_eval/api/__pycache__/filter.cpython-310.pyc
ADDED
Binary file (2.71 kB). View file
|
|
lm-evaluation/lm_eval/api/__pycache__/instance.cpython-310.pyc
ADDED
Binary file (1.51 kB). View file
|
|
lm-evaluation/lm_eval/api/__pycache__/metrics.cpython-310.pyc
ADDED
Binary file (12.3 kB). View file
|
|
lm-evaluation/lm_eval/api/__pycache__/model.cpython-310.pyc
ADDED
Binary file (12.1 kB). View file
|
|
lm-evaluation/lm_eval/api/__pycache__/registry.cpython-310.pyc
ADDED
Binary file (4.56 kB). View file
|
|
lm-evaluation/lm_eval/api/__pycache__/samplers.cpython-310.pyc
ADDED
Binary file (3.49 kB). View file
|
|
lm-evaluation/lm_eval/api/__pycache__/task.cpython-310.pyc
ADDED
Binary file (39.4 kB). View file
|
|
lm-evaluation/lm_eval/api/filter.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import ABC, abstractmethod
|
2 |
+
from dataclasses import dataclass
|
3 |
+
from typing import Callable, Iterable, List, Union
|
4 |
+
|
5 |
+
from lm_eval.api.instance import Instance
|
6 |
+
|
7 |
+
|
8 |
+
class Filter(ABC):
|
9 |
+
"""
|
10 |
+
Filter classes operate on a per-task level.
|
11 |
+
They take all model outputs (`instance.resps` for all `task.instances`)
|
12 |
+
across all instances of a task, and perform operations.
|
13 |
+
In a single run, one can configure any number of separate filters or lists of filters.
|
14 |
+
|
15 |
+
"""
|
16 |
+
|
17 |
+
def __init__(self, **kwargs) -> None:
|
18 |
+
"""
|
19 |
+
Can define custom behavior here, if an individual instantiation of a Filter class should have state.
|
20 |
+
"""
|
21 |
+
|
22 |
+
@abstractmethod
|
23 |
+
def apply(self, resps: Union[List, Iterable], docs: List[dict]) -> Iterable:
|
24 |
+
"""
|
25 |
+
Defines the operation to perform on a list of the `inst.resps` properties of `Instance` objects.
|
26 |
+
Should return the list of (filtered) response lists *in the same order as they were input*, e.g.
|
27 |
+
if pass in [<inst.resps for instance 0>, <inst.resps for instance 1>] should return
|
28 |
+
[<filtered resps for instance 0>, <filtered resps for instance 1>]
|
29 |
+
"""
|
30 |
+
return resps
|
31 |
+
|
32 |
+
|
33 |
+
@dataclass
|
34 |
+
class FilterEnsemble:
|
35 |
+
"""
|
36 |
+
FilterEnsemble creates a pipeline applying multiple filters.
|
37 |
+
Its intended usage is to stack multiple post-processing steps in order.
|
38 |
+
`task.apply_filters` should use a list of FilterEnsemble classes that it stores, to apply each
|
39 |
+
pipeline separately.
|
40 |
+
"""
|
41 |
+
|
42 |
+
name: str
|
43 |
+
filters: List[Callable[[], Filter]]
|
44 |
+
|
45 |
+
def apply(self, instances: List[Instance]) -> None:
|
46 |
+
resps, docs = zip(*((inst.resps, inst.doc) for inst in instances))
|
47 |
+
resps, docs = list(resps), list(docs)
|
48 |
+
|
49 |
+
for f in self.filters:
|
50 |
+
# apply filters in sequence
|
51 |
+
resps = f().apply(resps, docs)
|
52 |
+
|
53 |
+
# add the end results after filtering to filtered_requests of their respective source instances.
|
54 |
+
# has key `self.name`: each FilterEnsemble applied in a given run should use a different name.
|
55 |
+
for inst, resp in zip(instances, resps):
|
56 |
+
inst.filtered_resps[self.name] = resp
|
lm-evaluation/lm_eval/api/instance.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass, field
|
2 |
+
from typing import Literal, Optional, Tuple
|
3 |
+
|
4 |
+
|
5 |
+
OutputType = Literal[
|
6 |
+
"loglikelihood", "loglikelihood_rolling", "generate_until", "multiple_choice"
|
7 |
+
]
|
8 |
+
|
9 |
+
|
10 |
+
@dataclass
|
11 |
+
class Instance:
|
12 |
+
request_type: OutputType
|
13 |
+
doc: dict
|
14 |
+
arguments: tuple
|
15 |
+
idx: int
|
16 |
+
metadata: Tuple[Optional[str], Optional[int], Optional[int]] = field(
|
17 |
+
default_factory=lambda: (None, None, None)
|
18 |
+
)
|
19 |
+
resps: list = field(default_factory=list)
|
20 |
+
filtered_resps: dict = field(default_factory=dict)
|
21 |
+
|
22 |
+
# initialized after init
|
23 |
+
task_name: Optional[str] = None
|
24 |
+
doc_id: Optional[int] = None
|
25 |
+
repeats: Optional[int] = None
|
26 |
+
|
27 |
+
def __post_init__(self) -> None:
|
28 |
+
# unpack metadata field
|
29 |
+
self.task_name, self.doc_id, self.repeats = self.metadata
|
30 |
+
|
31 |
+
@property
|
32 |
+
def args(self):
|
33 |
+
"""
|
34 |
+
Returns (string,) where `string` is the string to calculate loglikelihood over
|
35 |
+
"""
|
36 |
+
return (
|
37 |
+
self.arguments if isinstance(self.arguments, tuple) else (self.arguments,)
|
38 |
+
)
|
lm-evaluation/lm_eval/api/metrics.py
ADDED
@@ -0,0 +1,509 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import math
|
3 |
+
import random
|
4 |
+
from collections.abc import Iterable
|
5 |
+
from typing import List
|
6 |
+
|
7 |
+
import evaluate as hf_evaluate
|
8 |
+
import numpy as np
|
9 |
+
import sacrebleu
|
10 |
+
import sklearn.metrics
|
11 |
+
|
12 |
+
from lm_eval.api.registry import register_aggregation, register_metric
|
13 |
+
|
14 |
+
|
15 |
+
eval_logger = logging.getLogger("lm-eval")
|
16 |
+
|
17 |
+
|
18 |
+
# Register Aggregations First
|
19 |
+
@register_aggregation("bypass")
|
20 |
+
def bypass_agg(arr):
|
21 |
+
return 999
|
22 |
+
|
23 |
+
|
24 |
+
@register_aggregation("mean")
|
25 |
+
def mean(arr):
|
26 |
+
return sum(arr) / len(arr)
|
27 |
+
|
28 |
+
|
29 |
+
@register_aggregation("median")
|
30 |
+
def median(arr):
|
31 |
+
return arr[len(arr) // 2]
|
32 |
+
|
33 |
+
|
34 |
+
# Certain metrics must be calculated across all documents in a benchmark.
|
35 |
+
# We use them as aggregation metrics, paired with no-op passthrough metric fns.
|
36 |
+
@register_aggregation("perplexity")
|
37 |
+
def perplexity(items):
|
38 |
+
return math.exp(-mean(items))
|
39 |
+
|
40 |
+
|
41 |
+
@register_aggregation("weighted_perplexity")
|
42 |
+
def weighted_perplexity(items):
|
43 |
+
return math.exp(-weighted_mean(items))
|
44 |
+
|
45 |
+
|
46 |
+
@register_aggregation("bits_per_byte")
|
47 |
+
def bits_per_byte(items):
|
48 |
+
return -weighted_mean(items) / math.log(2)
|
49 |
+
|
50 |
+
|
51 |
+
@register_aggregation("f1")
|
52 |
+
def f1_score(items):
|
53 |
+
unzipped_list = list(zip(*items))
|
54 |
+
golds = unzipped_list[0]
|
55 |
+
preds = unzipped_list[1]
|
56 |
+
fscore = sklearn.metrics.f1_score(golds, preds)
|
57 |
+
|
58 |
+
return np.max(fscore)
|
59 |
+
|
60 |
+
|
61 |
+
@register_aggregation("matthews_corrcoef")
|
62 |
+
def matthews_corrcoef(items):
|
63 |
+
unzipped_list = list(zip(*items))
|
64 |
+
golds = unzipped_list[0]
|
65 |
+
preds = unzipped_list[1]
|
66 |
+
# print(preds)
|
67 |
+
return sklearn.metrics.matthews_corrcoef(golds, preds)
|
68 |
+
|
69 |
+
|
70 |
+
@register_aggregation("bleu")
|
71 |
+
def bleu(items):
|
72 |
+
"""The Bilingual Evaluation Understudy Score, or BLEU for short, is a metric
|
73 |
+
for evaluating a generated sentence to a reference sentence. It counts matching
|
74 |
+
n-grams in the candidate translation to n-grams in the reference text, where
|
75 |
+
1-gram or unigram would be each token and a bigram comparison would be each
|
76 |
+
word pair. The comparison is made regardless of word order
|
77 |
+
Source: https://machinelearningmastery.com/calculate-bleu-score-for-text-python/
|
78 |
+
Paper: https://www.aclweb.org/anthology/P02-1040/
|
79 |
+
|
80 |
+
Higher is better
|
81 |
+
"""
|
82 |
+
refs = list(zip(*items))[0]
|
83 |
+
preds = list(zip(*items))[1]
|
84 |
+
refs, preds = _sacreformat(refs, preds)
|
85 |
+
return sacrebleu.corpus_bleu(preds, refs).score
|
86 |
+
|
87 |
+
|
88 |
+
@register_aggregation("chrf")
|
89 |
+
def chrf(items):
|
90 |
+
"""chrF++ is a tool for automatic evaluation of machine translation output
|
91 |
+
based on character n-gram precision and recall enhanced with word n-grams.
|
92 |
+
Source: https://github.com/m-popovic/chrF
|
93 |
+
Paper: https://www.aclweb.org/anthology/W15-3049.pdf
|
94 |
+
|
95 |
+
Higher is better # TODO I think
|
96 |
+
"""
|
97 |
+
refs = list(zip(*items))[0]
|
98 |
+
preds = list(zip(*items))[1]
|
99 |
+
refs, preds = _sacreformat(refs, preds)
|
100 |
+
return sacrebleu.corpus_chrf(preds, refs).score
|
101 |
+
|
102 |
+
|
103 |
+
@register_aggregation("ter")
|
104 |
+
def ter(items):
|
105 |
+
"""Translation Error Rate is an error metric for machine translation that
|
106 |
+
measures the number of edits required to change a system output into one
|
107 |
+
of the references
|
108 |
+
Source: http://www.cs.umd.edu/~snover/tercom/
|
109 |
+
Paper: http://mt-archive.info/AMTA-2006-Snover.pdf
|
110 |
+
|
111 |
+
Lower is better
|
112 |
+
"""
|
113 |
+
refs = list(zip(*items))[0]
|
114 |
+
preds = list(zip(*items))[1]
|
115 |
+
refs, preds = _sacreformat(refs, preds)
|
116 |
+
return sacrebleu.corpus_ter(preds, refs).score
|
117 |
+
|
118 |
+
|
119 |
+
@register_aggregation("brier_score")
|
120 |
+
def brier_score(items): # This is a passthrough function
|
121 |
+
gold, predictions = list(zip(*items))
|
122 |
+
gold = list(gold)
|
123 |
+
gold_one_hot = np.eye(np.max(gold) + 1)[gold]
|
124 |
+
predictions = list(zip(*items))[1]
|
125 |
+
return np.mean(np.sum((predictions - gold_one_hot) ** 2, axis=1))
|
126 |
+
|
127 |
+
|
128 |
+
@register_metric(
|
129 |
+
metric="brier_score",
|
130 |
+
higher_is_better=False,
|
131 |
+
output_type=["multiple_choice"],
|
132 |
+
aggregation="brier_score",
|
133 |
+
)
|
134 |
+
def brier_score_fn(items): # This is a passthrough function
|
135 |
+
return items
|
136 |
+
|
137 |
+
|
138 |
+
@register_metric(
|
139 |
+
metric="acc",
|
140 |
+
higher_is_better=True,
|
141 |
+
output_type=["loglikelihood", "multiple_choice"],
|
142 |
+
aggregation="mean",
|
143 |
+
)
|
144 |
+
def acc_fn(items): # This is a passthrough function
|
145 |
+
return items
|
146 |
+
|
147 |
+
|
148 |
+
@register_metric(
|
149 |
+
metric="acc_norm",
|
150 |
+
higher_is_better=True,
|
151 |
+
output_type=["loglikelihood", "multiple_choice"],
|
152 |
+
aggregation="mean",
|
153 |
+
)
|
154 |
+
def acc_norm_fn(items): # This is a passthrough function
|
155 |
+
return items
|
156 |
+
|
157 |
+
|
158 |
+
@register_metric(
|
159 |
+
metric="acc_mutual_info",
|
160 |
+
higher_is_better=True,
|
161 |
+
output_type="multiple_choice",
|
162 |
+
aggregation="mean",
|
163 |
+
)
|
164 |
+
def acc_mutual_info_fn(items): # This is a passthrough function
|
165 |
+
return items
|
166 |
+
|
167 |
+
|
168 |
+
exact_match = hf_evaluate.load("exact_match")
|
169 |
+
|
170 |
+
|
171 |
+
@register_metric(
|
172 |
+
metric="exact_match",
|
173 |
+
higher_is_better=True,
|
174 |
+
output_type="generate_until",
|
175 |
+
aggregation="mean",
|
176 |
+
)
|
177 |
+
def exact_match_fn(**kwargs):
|
178 |
+
return exact_match.compute(**kwargs)
|
179 |
+
|
180 |
+
|
181 |
+
@register_metric(
|
182 |
+
metric="perplexity",
|
183 |
+
higher_is_better=False,
|
184 |
+
output_type="loglikelihood",
|
185 |
+
aggregation="perplexity",
|
186 |
+
)
|
187 |
+
def perplexity_fn(items): # This is a passthrough function
|
188 |
+
return items
|
189 |
+
|
190 |
+
|
191 |
+
@register_metric(
|
192 |
+
metric="word_perplexity",
|
193 |
+
higher_is_better=False,
|
194 |
+
output_type="loglikelihood_rolling",
|
195 |
+
aggregation="weighted_perplexity",
|
196 |
+
)
|
197 |
+
def word_perplexity_fn(items): # This is a passthrough function
|
198 |
+
return items
|
199 |
+
|
200 |
+
|
201 |
+
@register_metric(
|
202 |
+
metric="byte_perplexity",
|
203 |
+
higher_is_better=False,
|
204 |
+
output_type="loglikelihood_rolling",
|
205 |
+
aggregation="weighted_perplexity",
|
206 |
+
)
|
207 |
+
def byte_perplexity_fn(items): # This is a passthrough function
|
208 |
+
return items
|
209 |
+
|
210 |
+
|
211 |
+
@register_metric(
|
212 |
+
metric="bits_per_byte",
|
213 |
+
higher_is_better=False,
|
214 |
+
output_type="loglikelihood_rolling",
|
215 |
+
aggregation="bits_per_byte",
|
216 |
+
)
|
217 |
+
def bits_per_byte_fn(items): # This is a passthrough function
|
218 |
+
return items
|
219 |
+
|
220 |
+
|
221 |
+
def pop_stddev(arr):
|
222 |
+
mu = mean(arr)
|
223 |
+
return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / len(arr))
|
224 |
+
|
225 |
+
|
226 |
+
def sample_stddev(arr):
|
227 |
+
mu = mean(arr)
|
228 |
+
return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / (len(arr) - 1))
|
229 |
+
|
230 |
+
|
231 |
+
def mean_stderr(arr):
|
232 |
+
return sample_stddev(arr) / math.sqrt(len(arr))
|
233 |
+
|
234 |
+
|
235 |
+
@register_metric(
|
236 |
+
metric="bypass",
|
237 |
+
higher_is_better=True,
|
238 |
+
output_type=["loglikelihood", "multiple_choice", "generate_until"],
|
239 |
+
aggregation="bypass",
|
240 |
+
)
|
241 |
+
def bypass(items):
|
242 |
+
return None
|
243 |
+
|
244 |
+
|
245 |
+
@register_metric(
|
246 |
+
metric="mcc",
|
247 |
+
higher_is_better=True,
|
248 |
+
output_type="multiple_choice",
|
249 |
+
aggregation="matthews_corrcoef",
|
250 |
+
)
|
251 |
+
def mcc_fn(items): # This is a passthrough function
|
252 |
+
return items
|
253 |
+
|
254 |
+
|
255 |
+
@register_metric(
|
256 |
+
metric="f1",
|
257 |
+
higher_is_better=True,
|
258 |
+
output_type="multiple_choice",
|
259 |
+
aggregation="f1",
|
260 |
+
)
|
261 |
+
def f1_fn(items): # This is a passthrough function
|
262 |
+
return items
|
263 |
+
|
264 |
+
|
265 |
+
@register_metric(
|
266 |
+
metric="bleu",
|
267 |
+
higher_is_better=True,
|
268 |
+
output_type="generate_until",
|
269 |
+
aggregation="bleu",
|
270 |
+
)
|
271 |
+
def bleu_fn(items): # This is a passthrough function
|
272 |
+
return items
|
273 |
+
|
274 |
+
|
275 |
+
@register_metric(
|
276 |
+
metric="chrf",
|
277 |
+
higher_is_better=True,
|
278 |
+
output_type="generate_until",
|
279 |
+
aggregation="chrf",
|
280 |
+
)
|
281 |
+
def chrf_fn(items): # This is a passthrough function
|
282 |
+
return items
|
283 |
+
|
284 |
+
|
285 |
+
@register_metric(
|
286 |
+
metric="ter",
|
287 |
+
higher_is_better=True,
|
288 |
+
output_type="generate_until",
|
289 |
+
aggregation="ter",
|
290 |
+
)
|
291 |
+
def ter_fn(items): # This is a passthrough function
|
292 |
+
return items
|
293 |
+
|
294 |
+
|
295 |
+
@register_metric(
|
296 |
+
metric="acc_all",
|
297 |
+
higher_is_better=True,
|
298 |
+
output_type="loglikelihood",
|
299 |
+
aggregation="mean",
|
300 |
+
)
|
301 |
+
def acc_all(items):
|
302 |
+
# Only count as correct if all answers are labeled correctly for each question
|
303 |
+
question_scoring_dict = {}
|
304 |
+
preds = list(zip(*items))[0]
|
305 |
+
docs = list(zip(*items))[1]
|
306 |
+
|
307 |
+
for doc, pred in zip(docs, preds):
|
308 |
+
paragraph_id = doc["idx"]["paragraph"]
|
309 |
+
question_id = doc["idx"]["question"]
|
310 |
+
if (paragraph_id, question_id) not in question_scoring_dict:
|
311 |
+
question_scoring_dict[(paragraph_id, question_id)] = []
|
312 |
+
|
313 |
+
gold_label = doc["label"] == 1
|
314 |
+
|
315 |
+
question_scoring_dict[(paragraph_id, question_id)].append(gold_label == pred)
|
316 |
+
acc = np.mean([int(all(x)) for x in question_scoring_dict.values()])
|
317 |
+
return acc
|
318 |
+
|
319 |
+
|
320 |
+
def acc_all_stderr(items):
|
321 |
+
# Only count as correct if all answers are labeled correctly for each question
|
322 |
+
question_scoring_dict = {}
|
323 |
+
preds = list(zip(*items))[0]
|
324 |
+
docs = list(zip(*items))[1]
|
325 |
+
|
326 |
+
for doc, pred in zip(docs, preds):
|
327 |
+
question_id = doc["idx"]["question"]
|
328 |
+
if question_id not in question_scoring_dict:
|
329 |
+
question_scoring_dict[question_id] = []
|
330 |
+
|
331 |
+
gold_label = doc["label"] == 1
|
332 |
+
question_scoring_dict[question_id].append(gold_label == pred)
|
333 |
+
|
334 |
+
acc = mean_stderr([int(all(x)) for x in question_scoring_dict.values()])
|
335 |
+
return acc
|
336 |
+
|
337 |
+
|
338 |
+
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
|
339 |
+
"""Compute max metric between prediction and each ground truth."""
|
340 |
+
scores_for_ground_truths = []
|
341 |
+
for ground_truth in ground_truths:
|
342 |
+
score = metric_fn(prediction, ground_truth)
|
343 |
+
scores_for_ground_truths.append(score)
|
344 |
+
return max(scores_for_ground_truths)
|
345 |
+
|
346 |
+
|
347 |
+
def weighted_mean(items):
|
348 |
+
a, b = zip(*items)
|
349 |
+
return sum(a) / sum(b)
|
350 |
+
|
351 |
+
|
352 |
+
def is_non_str_iterable(obj):
|
353 |
+
return isinstance(obj, Iterable) and not isinstance(obj, str)
|
354 |
+
|
355 |
+
|
356 |
+
def _sacreformat(refs, preds):
|
357 |
+
"""Format refs and preds for sacrebleu corpus calculation. It is very particular"""
|
358 |
+
# Sacrebleu expects (List[str], List[List[str])
|
359 |
+
# e.g. sacrebleu.corpus_bleu([pred_t], [[ref1_stream], [ref2_stream], ...])
|
360 |
+
|
361 |
+
# Note [ref1_stream] is the first reference for each pred.
|
362 |
+
# So lists are size N and (M, N) for N preds and M possible refs for each pred
|
363 |
+
# This is a different order of dimensions that I would expect
|
364 |
+
|
365 |
+
# We expect refs to be List[str] or List[List[str]], the outer list corresponding to preds
|
366 |
+
# Must become List[List[str]] with the inner list corresponding to preds
|
367 |
+
if not is_non_str_iterable(refs):
|
368 |
+
refs = list(refs)
|
369 |
+
if not is_non_str_iterable(refs[0]):
|
370 |
+
refs = [[ref] for ref in refs]
|
371 |
+
refs = list(zip(*refs))
|
372 |
+
# Note the number of refs in each ref list much match the number of preds
|
373 |
+
|
374 |
+
# We expect preds to be List[str] or List[List[str]]. Must become List[str]
|
375 |
+
if not is_non_str_iterable(preds):
|
376 |
+
preds = list(preds)
|
377 |
+
if is_non_str_iterable(preds[0]):
|
378 |
+
assert len(preds[0]) == 1, f"Pred must be a str, was {preds[0]}"
|
379 |
+
preds = [pred[0] for pred in preds]
|
380 |
+
|
381 |
+
return refs, preds
|
382 |
+
|
383 |
+
|
384 |
+
# stderr stuff
|
385 |
+
|
386 |
+
|
387 |
+
class _bootstrap_internal:
|
388 |
+
def __init__(self, f, n) -> None:
|
389 |
+
self.f = f
|
390 |
+
self.n = n
|
391 |
+
|
392 |
+
def __call__(self, v):
|
393 |
+
i, xs = v
|
394 |
+
rnd = random.Random()
|
395 |
+
rnd.seed(i)
|
396 |
+
res = []
|
397 |
+
for _ in range(self.n):
|
398 |
+
res.append(self.f(rnd.choices(xs, k=len(xs))))
|
399 |
+
return res
|
400 |
+
|
401 |
+
|
402 |
+
def bootstrap_stderr(f, xs, iters):
|
403 |
+
import multiprocessing as mp
|
404 |
+
|
405 |
+
pool = mp.Pool(mp.cpu_count())
|
406 |
+
# this gives a biased estimate of the stderr (i.e w/ the mean, it gives something
|
407 |
+
# equivalent to stderr calculated without Bessel's correction in the stddev.
|
408 |
+
# Unfortunately, I haven't been able to figure out what the right correction is
|
409 |
+
# to make the bootstrap unbiased - i considered multiplying by sqrt(n/(n-1)) but
|
410 |
+
# that would be ad-hoc and I can't prove that that would actually be an unbiased estimator)
|
411 |
+
# Thankfully, shouldn't matter because our samples are pretty big usually anyways
|
412 |
+
res = []
|
413 |
+
chunk_size = min(1000, iters)
|
414 |
+
from tqdm import tqdm
|
415 |
+
|
416 |
+
print("bootstrapping for stddev:", f.__name__)
|
417 |
+
for bootstrap in tqdm(
|
418 |
+
pool.imap(
|
419 |
+
_bootstrap_internal(f, chunk_size),
|
420 |
+
[(i, xs) for i in range(iters // chunk_size)],
|
421 |
+
),
|
422 |
+
total=iters // chunk_size,
|
423 |
+
):
|
424 |
+
# sample w replacement
|
425 |
+
res.extend(bootstrap)
|
426 |
+
|
427 |
+
pool.close()
|
428 |
+
return sample_stddev(res)
|
429 |
+
|
430 |
+
|
431 |
+
def stderr_for_metric(metric, bootstrap_iters):
|
432 |
+
bootstrappable = [
|
433 |
+
median,
|
434 |
+
matthews_corrcoef,
|
435 |
+
f1_score,
|
436 |
+
perplexity,
|
437 |
+
bleu,
|
438 |
+
chrf,
|
439 |
+
ter,
|
440 |
+
]
|
441 |
+
|
442 |
+
if metric in bootstrappable:
|
443 |
+
return lambda x: bootstrap_stderr(metric, x, iters=bootstrap_iters)
|
444 |
+
|
445 |
+
stderr = {mean: mean_stderr, acc_all: acc_all_stderr}
|
446 |
+
|
447 |
+
return stderr.get(metric, None)
|
448 |
+
|
449 |
+
|
450 |
+
def pooled_sample_stderr(stderrs: List[float], sizes: List[int]):
|
451 |
+
# Used to aggregate bootstrapped stderrs across subtasks in a group,
|
452 |
+
# when we are weighting by the size of each subtask.
|
453 |
+
#
|
454 |
+
|
455 |
+
assert len(stderrs) == len(sizes)
|
456 |
+
|
457 |
+
# formula source: https://en.wikipedia.org/wiki/Pooled_variance
|
458 |
+
# and: https://stats.stackexchange.com/a/4841331
|
459 |
+
# this empirically seems to match running `stderr_for_metric` on all instances
|
460 |
+
# from the subtasks concatenated with each other.
|
461 |
+
pooled_sample_var = (
|
462 |
+
sum([(size - 1) * stderr**2 * size for size, stderr in zip(sizes, stderrs)])
|
463 |
+
) / (sum(sizes) - len(sizes))
|
464 |
+
|
465 |
+
return np.sqrt(pooled_sample_var / sum(sizes))
|
466 |
+
|
467 |
+
|
468 |
+
def combined_sample_stderr(stderrs: List[float], sizes: List[int], metrics=None):
|
469 |
+
assert (
|
470 |
+
metrics is not None
|
471 |
+
), "Need to pass a list of each subtask's metric for this stderr aggregation"
|
472 |
+
assert len(stderrs) == len(sizes) and len(sizes) == len(metrics)
|
473 |
+
|
474 |
+
# See https://github.com/EleutherAI/lm-evaluation-harness/pull/1390 for more documentation.
|
475 |
+
# This formula depends on sample means.
|
476 |
+
# removed because it seems to give erroneously huge stderrs for groupings of tasks
|
477 |
+
# and does not seem to match up with bootstrap-calculated stderrs for groups.
|
478 |
+
|
479 |
+
### don't use this unless a statistician has told you it's the right thing to do ###
|
480 |
+
|
481 |
+
# accumulators: we'll aggregate pairwise N - 1 times
|
482 |
+
variance = stderrs[0] ** 2
|
483 |
+
curr_size = sizes[0]
|
484 |
+
curr_score = metrics[0]
|
485 |
+
|
486 |
+
for stderr, size, score in zip(stderrs[1:], sizes[1:], metrics[1:]):
|
487 |
+
curr_score = ((curr_score * curr_size) + (score * size)) / (
|
488 |
+
curr_size + size
|
489 |
+
) # NOTE: this assumes our aggregation fn is "mean"
|
490 |
+
|
491 |
+
variance = ((curr_size - 1) * variance + (size - 1) * (stderr**2)) / (
|
492 |
+
curr_size + size - 1
|
493 |
+
) + curr_size * size / ((curr_size + size) * (curr_size + size - 1)) * (
|
494 |
+
curr_score - score
|
495 |
+
) ** 2
|
496 |
+
|
497 |
+
return np.sqrt(variance)
|
498 |
+
|
499 |
+
|
500 |
+
def aggregate_subtask_metrics(metrics, sizes, weight_by_size=True):
|
501 |
+
# A helper function that is used to aggregate
|
502 |
+
# subtask scores cross-task.
|
503 |
+
# TODO: does not hold for non-mean aggregations
|
504 |
+
if not weight_by_size:
|
505 |
+
sizes = [1] * len(sizes)
|
506 |
+
|
507 |
+
assert len(metrics) == len(sizes)
|
508 |
+
|
509 |
+
return sum([metric * size for metric, size in zip(metrics, sizes)]) / sum(sizes)
|
lm-evaluation/lm_eval/api/model.py
ADDED
@@ -0,0 +1,346 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import abc
|
2 |
+
import hashlib
|
3 |
+
import json
|
4 |
+
import logging
|
5 |
+
import os
|
6 |
+
from typing import List, Optional, Tuple, Type, TypeVar
|
7 |
+
|
8 |
+
import transformers
|
9 |
+
from sqlitedict import SqliteDict
|
10 |
+
from tqdm import tqdm
|
11 |
+
|
12 |
+
from lm_eval import utils
|
13 |
+
|
14 |
+
|
15 |
+
eval_logger = logging.getLogger("lm-eval")
|
16 |
+
|
17 |
+
T = TypeVar("T", bound="LM")
|
18 |
+
|
19 |
+
|
20 |
+
class LM(abc.ABC):
|
21 |
+
def __init__(self) -> None:
|
22 |
+
"""Defines the interface that should be implemented by all LM subclasses.
|
23 |
+
LMs are assumed to take text (strings) as input and yield strings as output
|
24 |
+
(inputs/outputs should be tokenization-agnostic.)
|
25 |
+
|
26 |
+
"""
|
27 |
+
# set rank and world size to a single process, by default.
|
28 |
+
self._rank = 0
|
29 |
+
self._world_size = 1
|
30 |
+
self.cache_hook = CacheHook(None)
|
31 |
+
|
32 |
+
@abc.abstractmethod
|
33 |
+
def loglikelihood(self, requests) -> List[Tuple[float, bool]]:
|
34 |
+
"""Compute log-likelihood of generating a continuation from a context.
|
35 |
+
Downstream tasks should attempt to use loglikelihood instead of other
|
36 |
+
LM calls whenever possible.
|
37 |
+
|
38 |
+
:param requests: list[Instance]
|
39 |
+
A list of Instance objects, with property `args` which returns a tuple (context, continuation).
|
40 |
+
`context: str`
|
41 |
+
Context string. Implementations of LM must be able to handle an
|
42 |
+
empty context string.
|
43 |
+
`continuation: str`
|
44 |
+
The continuation over which log likelihood will be calculated. If
|
45 |
+
there is a word boundary, the space should be in the continuation.
|
46 |
+
For example, context="hello" continuation=" world" is correct.
|
47 |
+
|
48 |
+
:return: list[tuple[float, bool]]
|
49 |
+
A list of pairs (logprob, isgreedy)
|
50 |
+
`logprob: float`
|
51 |
+
The log probability of `continuation`.
|
52 |
+
`isgreedy`:
|
53 |
+
Whether `continuation` would be generated by greedy sampling from `context`.
|
54 |
+
"""
|
55 |
+
pass
|
56 |
+
|
57 |
+
@abc.abstractmethod
|
58 |
+
def loglikelihood_rolling(self, requests) -> List[Tuple[float]]:
|
59 |
+
"""Compute full log-likelihood of a string, with no truncation, for perplexity computation
|
60 |
+
- We will use the full max context length of the model.
|
61 |
+
- For inputs that exceed the max context length, we divide the tokenized string into chunks of up to
|
62 |
+
the max context length.
|
63 |
+
- IMPORTANT: Each document's loglikelihood/perplexity is computed *separately*, unlike other implementations
|
64 |
+
which may simply concatenate multiple documents together.
|
65 |
+
- IMPORTANT: We maximize the amount of context for each prediction. Specifically, for inputs that we break into
|
66 |
+
multiple chunks, the last input will still a full-sized context.
|
67 |
+
Example:
|
68 |
+
Input tokens: [ 0 1 2 3 4 5 6 7 8 9 ]
|
69 |
+
Prefix: BOS/EOS
|
70 |
+
Max context length: 4
|
71 |
+
Resulting input/prediction pairs:
|
72 |
+
|
73 |
+
INPUT: BOS 0 1 2
|
74 |
+
PRED: 0 1 2 3
|
75 |
+
|
76 |
+
INPUT: 3 4 5 6
|
77 |
+
PRED: 4 5 6 7
|
78 |
+
|
79 |
+
INPUT: 5 6 7 8
|
80 |
+
PRED: 8 9
|
81 |
+
|
82 |
+
Observe that:
|
83 |
+
1. Each token is predicted exactly once
|
84 |
+
2. For the last pair, we provide the full context, but only score the last two tokens
|
85 |
+
|
86 |
+
:param requests: list[Instance]
|
87 |
+
A list of Instance objects with property `args` which returns a tuple (context,).
|
88 |
+
string: str
|
89 |
+
String for which we are computing overall loglikelihood
|
90 |
+
:return: list[tuple[float]]
|
91 |
+
A list of tuples (logprob,)
|
92 |
+
logprob: float
|
93 |
+
The log probability of `context` conditioned on the BOS/EOS token.
|
94 |
+
Can also be overridden for custom cases by `prefix_token_id`.
|
95 |
+
"""
|
96 |
+
pass
|
97 |
+
|
98 |
+
# TODO: Add an optional max length
|
99 |
+
@abc.abstractmethod
|
100 |
+
def generate_until(self, requests) -> List[str]:
|
101 |
+
"""Generate greedily until a stopping sequence
|
102 |
+
|
103 |
+
:param requests: list[Instance]
|
104 |
+
A list of Instance objects with property `args` which returns a tuple (context, until).
|
105 |
+
context: str
|
106 |
+
Context string
|
107 |
+
until: [str]
|
108 |
+
The string sequences to generate until. These string sequences
|
109 |
+
may each span across multiple tokens, or may be part of one token.
|
110 |
+
:return: list[str]
|
111 |
+
A list of strings continuation
|
112 |
+
continuation: str
|
113 |
+
The generated continuation.
|
114 |
+
"""
|
115 |
+
pass
|
116 |
+
|
117 |
+
@classmethod
|
118 |
+
def create_from_arg_string(
|
119 |
+
cls: Type[T], arg_string: str, additional_config: Optional[dict] = None
|
120 |
+
) -> T:
|
121 |
+
"""
|
122 |
+
Creates an instance of the LM class using the given argument string and additional config.
|
123 |
+
|
124 |
+
Parameters:
|
125 |
+
- arg_string: A string containing arguments in the format key1=value1,key2=value2.
|
126 |
+
- additional_config: Optional dictionary containing additional configuration parameters.
|
127 |
+
|
128 |
+
Returns:
|
129 |
+
- Instance of the LM class.
|
130 |
+
"""
|
131 |
+
additional_config = {} if additional_config is None else additional_config
|
132 |
+
args = utils.simple_parse_args_string(arg_string)
|
133 |
+
args2 = {k: v for k, v in additional_config.items() if v is not None}
|
134 |
+
return cls(**args, **args2)
|
135 |
+
|
136 |
+
@classmethod
|
137 |
+
def create_from_arg_obj(
|
138 |
+
cls: Type[T], arg_dict: dict, additional_config: Optional[dict] = None
|
139 |
+
) -> T:
|
140 |
+
"""
|
141 |
+
Creates an instance of the LM class using the given arg_obj
|
142 |
+
|
143 |
+
Parameters:
|
144 |
+
- arg_obj: A dict containing arguments in the format key1=value1,key2=value2.
|
145 |
+
- additional_config: Optional dictionary containing additional configuration parameters.
|
146 |
+
|
147 |
+
Returns:
|
148 |
+
- Instance of the LM class.
|
149 |
+
"""
|
150 |
+
|
151 |
+
additional_config = {} if additional_config is None else additional_config
|
152 |
+
additional_config = {
|
153 |
+
k: v for k, v in additional_config.items() if v is not None
|
154 |
+
}
|
155 |
+
|
156 |
+
return cls(**arg_dict, **additional_config)
|
157 |
+
|
158 |
+
@property
|
159 |
+
def rank(self):
|
160 |
+
# used in the case of parallelism. Hardcoded to
|
161 |
+
# ensure no errors arise using API models which do
|
162 |
+
# not support multi-device parallelism nor expect it.
|
163 |
+
return self._rank
|
164 |
+
|
165 |
+
@property
|
166 |
+
def world_size(self):
|
167 |
+
# used in the case of parallelism. Hardcoded to
|
168 |
+
# ensure no errors arise using API models which do
|
169 |
+
# not support multi-device parallelism nor expect it.
|
170 |
+
return self._world_size
|
171 |
+
|
172 |
+
def set_cache_hook(self, cache_hook) -> None:
|
173 |
+
self.cache_hook = cache_hook
|
174 |
+
|
175 |
+
|
176 |
+
### SQLite-based caching of LM responses
|
177 |
+
def hash_args(attr, args):
|
178 |
+
dat = json.dumps([attr] + list(args))
|
179 |
+
return hashlib.sha256(dat.encode("utf-8")).hexdigest()
|
180 |
+
|
181 |
+
|
182 |
+
class CacheHook:
|
183 |
+
def __init__(self, cachinglm) -> None:
|
184 |
+
if cachinglm is None:
|
185 |
+
self.dbdict = None
|
186 |
+
return
|
187 |
+
|
188 |
+
self.dbdict = cachinglm.dbdict
|
189 |
+
|
190 |
+
def add_partial(self, attr, req, res) -> None:
|
191 |
+
if self.dbdict is None:
|
192 |
+
return
|
193 |
+
hsh = hash_args(attr, req)
|
194 |
+
self.dbdict[hsh] = res
|
195 |
+
|
196 |
+
|
197 |
+
class CachingLM:
|
198 |
+
def __init__(self, lm, cache_db) -> None:
|
199 |
+
"""LM wrapper that returns cached results if they exist, and uses the underlying LM if not.
|
200 |
+
|
201 |
+
:param lm: LM
|
202 |
+
Underlying LM
|
203 |
+
:param cache_db: str
|
204 |
+
Path to cache db
|
205 |
+
"""
|
206 |
+
self.lm = lm
|
207 |
+
self.cache_db = cache_db
|
208 |
+
if os.path.dirname(cache_db):
|
209 |
+
os.makedirs(os.path.dirname(cache_db), exist_ok=True)
|
210 |
+
self.dbdict = SqliteDict(cache_db, autocommit=True)
|
211 |
+
|
212 |
+
# add hook to lm
|
213 |
+
lm.set_cache_hook(self.get_cache_hook())
|
214 |
+
|
215 |
+
def __getattr__(self, attr):
|
216 |
+
lm_attr = getattr(self.lm, attr)
|
217 |
+
if not callable(lm_attr):
|
218 |
+
return lm_attr
|
219 |
+
|
220 |
+
def fn(requests):
|
221 |
+
res = []
|
222 |
+
remaining_reqs = []
|
223 |
+
warned = False
|
224 |
+
# figure out which ones are cached and which ones are new
|
225 |
+
eval_logger.info(
|
226 |
+
f"Loading '{attr}' responses from cache '{self.cache_db}' where possible..."
|
227 |
+
)
|
228 |
+
for req in tqdm(requests, desc="Checking cached requests"):
|
229 |
+
hsh = hash_args(attr, req.args)
|
230 |
+
if attr == "generate_until" and req.args[1].get("do_sample", False):
|
231 |
+
# when we are doing non-greedy generation, don't use the cache
|
232 |
+
# (else every "randomly sampled" generation would be identical for repeats > 1).
|
233 |
+
if not warned:
|
234 |
+
eval_logger.warning(
|
235 |
+
f"Arguments to lm.generate_until() '{req.args[1]}' include non-deterministic sampling. Caching will not be performed for such requests."
|
236 |
+
)
|
237 |
+
warned = True
|
238 |
+
res.append(None)
|
239 |
+
remaining_reqs.append(req)
|
240 |
+
elif hsh in self.dbdict:
|
241 |
+
ob = self.dbdict[hsh]
|
242 |
+
|
243 |
+
assert ob is not None
|
244 |
+
|
245 |
+
res.append(ob)
|
246 |
+
else:
|
247 |
+
res.append(None)
|
248 |
+
remaining_reqs.append(req)
|
249 |
+
eval_logger.info(
|
250 |
+
f"Cached requests: {len(requests) - len(remaining_reqs)}, Requests remaining: {len(remaining_reqs)}"
|
251 |
+
)
|
252 |
+
# actually run the LM on the requests that do not have cached results
|
253 |
+
rem_res = getattr(self.lm, attr)(remaining_reqs)
|
254 |
+
|
255 |
+
# stick the new ones back into the list and also cache any of the new ones
|
256 |
+
resptr = 0
|
257 |
+
for req, r in zip(remaining_reqs, rem_res):
|
258 |
+
while res[resptr] is not None:
|
259 |
+
resptr += 1
|
260 |
+
|
261 |
+
res[resptr] = r
|
262 |
+
|
263 |
+
# caching
|
264 |
+
hsh = hash_args(attr, req.args)
|
265 |
+
self.dbdict[hsh] = r
|
266 |
+
self.dbdict.commit()
|
267 |
+
|
268 |
+
return res
|
269 |
+
|
270 |
+
return fn
|
271 |
+
|
272 |
+
def get_cache_hook(self):
|
273 |
+
return CacheHook(self)
|
274 |
+
|
275 |
+
|
276 |
+
class TemplateLM(LM):
|
277 |
+
"""
|
278 |
+
A class acting as intermediary between the LM base class
|
279 |
+
and boilerplate often included in other LM subclasses.
|
280 |
+
"""
|
281 |
+
|
282 |
+
@property
|
283 |
+
@abc.abstractmethod
|
284 |
+
def eot_token_id(self):
|
285 |
+
pass
|
286 |
+
|
287 |
+
@property
|
288 |
+
def prefix_token_id(self):
|
289 |
+
# it is used as prefix for loglikelihood
|
290 |
+
return self.eot_token_id
|
291 |
+
|
292 |
+
@abc.abstractmethod
|
293 |
+
def tok_encode(self, string: str, **kwargs):
|
294 |
+
pass
|
295 |
+
|
296 |
+
@abc.abstractmethod
|
297 |
+
def _loglikelihood_tokens(self, requests, **kwargs):
|
298 |
+
pass
|
299 |
+
|
300 |
+
def _encode_pair(self, context, continuation):
|
301 |
+
n_spaces = len(context) - len(context.rstrip())
|
302 |
+
if n_spaces > 0:
|
303 |
+
continuation = context[-n_spaces:] + continuation
|
304 |
+
context = context[:-n_spaces]
|
305 |
+
|
306 |
+
model_class = getattr(self, "AUTO_MODEL_CLASS", None)
|
307 |
+
|
308 |
+
if model_class == transformers.AutoModelForSeq2SeqLM:
|
309 |
+
context_enc = self.tok_encode(context)
|
310 |
+
continuation_enc = self.tok_encode(continuation, add_special_tokens=False)
|
311 |
+
else:
|
312 |
+
whole_enc = self.tok_encode(context + continuation)
|
313 |
+
context_enc = self.tok_encode(context)
|
314 |
+
|
315 |
+
context_enc_len = len(context_enc)
|
316 |
+
continuation_enc = whole_enc[context_enc_len:]
|
317 |
+
|
318 |
+
return context_enc, continuation_enc
|
319 |
+
|
320 |
+
def loglikelihood(
|
321 |
+
self, requests, disable_tqdm: bool = False
|
322 |
+
) -> List[Tuple[float, bool]]:
|
323 |
+
new_reqs = []
|
324 |
+
for context, continuation in [req.args for req in requests]:
|
325 |
+
if context == "":
|
326 |
+
# BOS or EOS as context
|
327 |
+
context_enc, continuation_enc = (
|
328 |
+
[self.prefix_token_id],
|
329 |
+
self.tok_encode(continuation),
|
330 |
+
)
|
331 |
+
else:
|
332 |
+
context_enc, continuation_enc = self._encode_pair(context, continuation)
|
333 |
+
|
334 |
+
new_reqs.append(((context, continuation), context_enc, continuation_enc))
|
335 |
+
|
336 |
+
return self._loglikelihood_tokens(new_reqs, disable_tqdm=disable_tqdm)
|
337 |
+
|
338 |
+
@abc.abstractmethod
|
339 |
+
def loglikelihood_rolling(
|
340 |
+
self, requests, disable_tqdm: bool = False
|
341 |
+
) -> List[Tuple[float, bool]]:
|
342 |
+
pass
|
343 |
+
|
344 |
+
@abc.abstractmethod
|
345 |
+
def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]:
|
346 |
+
pass
|
lm-evaluation/lm_eval/api/registry.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from typing import Callable, Dict
|
3 |
+
|
4 |
+
import evaluate as hf_evaluate
|
5 |
+
|
6 |
+
from lm_eval.api.model import LM
|
7 |
+
|
8 |
+
|
9 |
+
eval_logger = logging.getLogger("lm-eval")
|
10 |
+
|
11 |
+
MODEL_REGISTRY = {}
|
12 |
+
|
13 |
+
|
14 |
+
def register_model(*names):
|
15 |
+
# either pass a list or a single alias.
|
16 |
+
# function receives them as a tuple of strings
|
17 |
+
|
18 |
+
def decorate(cls):
|
19 |
+
for name in names:
|
20 |
+
assert issubclass(
|
21 |
+
cls, LM
|
22 |
+
), f"Model '{name}' ({cls.__name__}) must extend LM class"
|
23 |
+
|
24 |
+
assert (
|
25 |
+
name not in MODEL_REGISTRY
|
26 |
+
), f"Model named '{name}' conflicts with existing model! Please register with a non-conflicting alias instead."
|
27 |
+
|
28 |
+
MODEL_REGISTRY[name] = cls
|
29 |
+
return cls
|
30 |
+
|
31 |
+
return decorate
|
32 |
+
|
33 |
+
|
34 |
+
def get_model(model_name):
|
35 |
+
try:
|
36 |
+
return MODEL_REGISTRY[model_name]
|
37 |
+
except KeyError:
|
38 |
+
raise ValueError(
|
39 |
+
f"Attempted to load model '{model_name}', but no model for this name found! Supported model names: {', '.join(MODEL_REGISTRY.keys())}"
|
40 |
+
)
|
41 |
+
|
42 |
+
|
43 |
+
TASK_REGISTRY = {}
|
44 |
+
GROUP_REGISTRY = {}
|
45 |
+
ALL_TASKS = set()
|
46 |
+
func2task_index = {}
|
47 |
+
|
48 |
+
|
49 |
+
def register_task(name):
|
50 |
+
def decorate(fn):
|
51 |
+
assert (
|
52 |
+
name not in TASK_REGISTRY
|
53 |
+
), f"task named '{name}' conflicts with existing registered task!"
|
54 |
+
|
55 |
+
TASK_REGISTRY[name] = fn
|
56 |
+
ALL_TASKS.add(name)
|
57 |
+
func2task_index[fn.__name__] = name
|
58 |
+
return fn
|
59 |
+
|
60 |
+
return decorate
|
61 |
+
|
62 |
+
|
63 |
+
def register_group(name):
|
64 |
+
def decorate(fn):
|
65 |
+
func_name = func2task_index[fn.__name__]
|
66 |
+
if name in GROUP_REGISTRY:
|
67 |
+
GROUP_REGISTRY[name].append(func_name)
|
68 |
+
else:
|
69 |
+
GROUP_REGISTRY[name] = [func_name]
|
70 |
+
ALL_TASKS.add(name)
|
71 |
+
return fn
|
72 |
+
|
73 |
+
return decorate
|
74 |
+
|
75 |
+
|
76 |
+
OUTPUT_TYPE_REGISTRY = {}
|
77 |
+
METRIC_REGISTRY = {}
|
78 |
+
METRIC_AGGREGATION_REGISTRY = {}
|
79 |
+
AGGREGATION_REGISTRY: Dict[str, Callable[[], Dict[str, Callable]]] = {}
|
80 |
+
HIGHER_IS_BETTER_REGISTRY = {}
|
81 |
+
|
82 |
+
DEFAULT_METRIC_REGISTRY = {
|
83 |
+
"loglikelihood": [
|
84 |
+
"perplexity",
|
85 |
+
"acc",
|
86 |
+
],
|
87 |
+
"loglikelihood_rolling": ["word_perplexity", "byte_perplexity", "bits_per_byte"],
|
88 |
+
"multiple_choice": ["acc", "acc_norm"],
|
89 |
+
"generate_until": ["exact_match"],
|
90 |
+
}
|
91 |
+
|
92 |
+
|
93 |
+
def register_metric(**args):
|
94 |
+
# TODO: do we want to enforce a certain interface to registered metrics?
|
95 |
+
def decorate(fn):
|
96 |
+
assert "metric" in args
|
97 |
+
name = args["metric"]
|
98 |
+
|
99 |
+
for key, registry in [
|
100 |
+
("metric", METRIC_REGISTRY),
|
101 |
+
("higher_is_better", HIGHER_IS_BETTER_REGISTRY),
|
102 |
+
("aggregation", METRIC_AGGREGATION_REGISTRY),
|
103 |
+
]:
|
104 |
+
if key in args:
|
105 |
+
value = args[key]
|
106 |
+
assert (
|
107 |
+
value not in registry
|
108 |
+
), f"{key} named '{value}' conflicts with existing registered {key}!"
|
109 |
+
|
110 |
+
if key == "metric":
|
111 |
+
registry[name] = fn
|
112 |
+
elif key == "aggregation":
|
113 |
+
registry[name] = AGGREGATION_REGISTRY[value]
|
114 |
+
else:
|
115 |
+
registry[name] = value
|
116 |
+
|
117 |
+
return fn
|
118 |
+
|
119 |
+
return decorate
|
120 |
+
|
121 |
+
|
122 |
+
def get_metric(name: str, hf_evaluate_metric=False) -> Callable:
|
123 |
+
if not hf_evaluate_metric:
|
124 |
+
if name in METRIC_REGISTRY:
|
125 |
+
return METRIC_REGISTRY[name]
|
126 |
+
else:
|
127 |
+
eval_logger.warning(
|
128 |
+
f"Could not find registered metric '{name}' in lm-eval, searching in HF Evaluate library..."
|
129 |
+
)
|
130 |
+
|
131 |
+
try:
|
132 |
+
metric_object = hf_evaluate.load(name)
|
133 |
+
return metric_object.compute
|
134 |
+
except Exception:
|
135 |
+
eval_logger.error(
|
136 |
+
f"{name} not found in the evaluate library! Please check https://huggingface.co/evaluate-metric",
|
137 |
+
)
|
138 |
+
|
139 |
+
|
140 |
+
def register_aggregation(name: str):
|
141 |
+
def decorate(fn):
|
142 |
+
assert (
|
143 |
+
name not in AGGREGATION_REGISTRY
|
144 |
+
), f"aggregation named '{name}' conflicts with existing registered aggregation!"
|
145 |
+
|
146 |
+
AGGREGATION_REGISTRY[name] = fn
|
147 |
+
return fn
|
148 |
+
|
149 |
+
return decorate
|
150 |
+
|
151 |
+
|
152 |
+
def get_aggregation(name: str) -> Callable[[], Dict[str, Callable]]:
|
153 |
+
try:
|
154 |
+
return AGGREGATION_REGISTRY[name]
|
155 |
+
except KeyError:
|
156 |
+
eval_logger.warning(f"{name} not a registered aggregation metric!")
|
157 |
+
|
158 |
+
|
159 |
+
def get_metric_aggregation(name: str) -> Callable[[], Dict[str, Callable]]:
|
160 |
+
try:
|
161 |
+
return METRIC_AGGREGATION_REGISTRY[name]
|
162 |
+
except KeyError:
|
163 |
+
eval_logger.warning(f"{name} metric is not assigned a default aggregation!")
|
164 |
+
|
165 |
+
|
166 |
+
def is_higher_better(metric_name) -> bool:
|
167 |
+
try:
|
168 |
+
return HIGHER_IS_BETTER_REGISTRY[metric_name]
|
169 |
+
except KeyError:
|
170 |
+
eval_logger.warning(
|
171 |
+
f"higher_is_better not specified for metric '{metric_name}'!"
|
172 |
+
)
|
lm-evaluation/lm_eval/api/samplers.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
class ContextSampler:
|
2 |
+
def __init__(self, docs, task, fewshot_indices=None, rnd=None) -> None:
|
3 |
+
self.rnd = rnd
|
4 |
+
assert self.rnd, "must pass rnd to FewShotSampler!"
|
5 |
+
|
6 |
+
self.task = task
|
7 |
+
self.config = task._config
|
8 |
+
|
9 |
+
self.target_delimiter = self.config.target_delimiter
|
10 |
+
self.fewshot_delimiter = self.config.fewshot_delimiter
|
11 |
+
|
12 |
+
self.doc_to_text = self.task.doc_to_text
|
13 |
+
self.doc_to_target = self.task.doc_to_target
|
14 |
+
self.doc_to_choice = self.task.doc_to_choice
|
15 |
+
|
16 |
+
self.docs = docs # HF dataset split, provided by task._fewshot_docs()
|
17 |
+
if fewshot_indices: # subset few-shot docs from
|
18 |
+
self.docs = self.docs.select(fewshot_indices)
|
19 |
+
|
20 |
+
def get_context(self, doc, num_fewshot):
|
21 |
+
# draw an extra fewshot sample if using same split as evaluating on
|
22 |
+
n_samples = (
|
23 |
+
num_fewshot + 1
|
24 |
+
if self.config.fewshot_split == self.config.test_split
|
25 |
+
else num_fewshot
|
26 |
+
)
|
27 |
+
|
28 |
+
# draw `n_samples` docs from fewshot_docs
|
29 |
+
fewshotex = self.sample(n_samples)
|
30 |
+
|
31 |
+
# get rid of the doc that's the one we're evaluating, if it's in the fewshot
|
32 |
+
# TODO: should we just stop people from using fewshot from same split as evaluating?
|
33 |
+
selected_docs = [x for x in fewshotex if x != doc][:num_fewshot]
|
34 |
+
|
35 |
+
labeled_examples = (
|
36 |
+
self.fewshot_delimiter.join(
|
37 |
+
[
|
38 |
+
# TODO: is separating doc_to_text and doc_to_target by one space always desired?
|
39 |
+
(
|
40 |
+
self.doc_to_text(doc)
|
41 |
+
if (
|
42 |
+
self.config.doc_to_choice is None
|
43 |
+
or isinstance(self.doc_to_text(doc), str)
|
44 |
+
)
|
45 |
+
else self.doc_to_choice(doc)[self.doc_to_text(doc)]
|
46 |
+
)
|
47 |
+
+ self.target_delimiter
|
48 |
+
+ (
|
49 |
+
str(self.doc_to_target(doc)[0])
|
50 |
+
if isinstance(self.doc_to_target(doc), list)
|
51 |
+
else self.doc_to_target(doc)
|
52 |
+
if (
|
53 |
+
self.config.doc_to_choice is None
|
54 |
+
or isinstance(self.doc_to_target(doc), str)
|
55 |
+
)
|
56 |
+
else str(self.doc_to_choice(doc)[self.doc_to_target(doc)])
|
57 |
+
)
|
58 |
+
for doc in selected_docs
|
59 |
+
]
|
60 |
+
)
|
61 |
+
+ self.fewshot_delimiter
|
62 |
+
)
|
63 |
+
|
64 |
+
return labeled_examples
|
65 |
+
|
66 |
+
def sample(self, n):
|
67 |
+
"""
|
68 |
+
Draw `n` samples from our fewshot docs. This method should be overridden by subclasses.
|
69 |
+
"""
|
70 |
+
|
71 |
+
return self.rnd.sample(self.docs, n)
|
72 |
+
|
73 |
+
|
74 |
+
class FirstNSampler(ContextSampler):
|
75 |
+
def sample(self, n) -> None:
|
76 |
+
"""
|
77 |
+
Draw the first `n` samples in order from the specified split.
|
78 |
+
Used for tasks with "canonical" ordered fewshot examples, such as MMLU and CMMLU.
|
79 |
+
"""
|
80 |
+
assert (
|
81 |
+
n <= len(self.docs)
|
82 |
+
), f"Error: number of fewshot samples requested exceeds the {len(self.docs)} that are available."
|
83 |
+
return self.docs[:n]
|
84 |
+
|
85 |
+
|
86 |
+
class BalancedSampler(ContextSampler):
|
87 |
+
def sample(self, n) -> None:
|
88 |
+
"""
|
89 |
+
TODO: this should return approximately class-balanced samples from our fewshot examples.
|
90 |
+
TODO: what order should they be in? maybe random?
|
91 |
+
"""
|
92 |
+
|
93 |
+
pass
|
94 |
+
|
95 |
+
|
96 |
+
class ManualSampler(ContextSampler):
|
97 |
+
def sample(self, n) -> None:
|
98 |
+
""" """
|
99 |
+
pass
|
100 |
+
|
101 |
+
|
102 |
+
SAMPLER_REGISTRY = {
|
103 |
+
"default": ContextSampler,
|
104 |
+
"first_n": FirstNSampler,
|
105 |
+
}
|
106 |
+
|
107 |
+
|
108 |
+
def get_sampler(name):
|
109 |
+
try:
|
110 |
+
return SAMPLER_REGISTRY[name]
|
111 |
+
except KeyError:
|
112 |
+
raise ValueError(
|
113 |
+
f"Attempted to use contextsampler '{name}', but no sampling strategy for this name found! Supported model names: {', '.join(SAMPLER_REGISTRY.keys())}"
|
114 |
+
)
|
lm-evaluation/lm_eval/api/task.py
ADDED
@@ -0,0 +1,1498 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import abc
|
2 |
+
import ast
|
3 |
+
import logging
|
4 |
+
import random
|
5 |
+
import re
|
6 |
+
from collections.abc import Callable
|
7 |
+
from copy import deepcopy
|
8 |
+
from dataclasses import asdict, dataclass
|
9 |
+
from inspect import getsource
|
10 |
+
from typing import (
|
11 |
+
Any,
|
12 |
+
Dict,
|
13 |
+
Iterable,
|
14 |
+
Iterator,
|
15 |
+
List,
|
16 |
+
Literal,
|
17 |
+
Mapping,
|
18 |
+
Optional,
|
19 |
+
Tuple,
|
20 |
+
Union,
|
21 |
+
)
|
22 |
+
|
23 |
+
import datasets
|
24 |
+
import numpy as np
|
25 |
+
from tqdm import tqdm
|
26 |
+
|
27 |
+
from lm_eval import utils
|
28 |
+
from lm_eval.api import samplers
|
29 |
+
from lm_eval.api.instance import Instance, OutputType
|
30 |
+
from lm_eval.api.metrics import bits_per_byte, mean, weighted_perplexity
|
31 |
+
from lm_eval.api.registry import (
|
32 |
+
AGGREGATION_REGISTRY,
|
33 |
+
DEFAULT_METRIC_REGISTRY,
|
34 |
+
get_aggregation,
|
35 |
+
get_metric,
|
36 |
+
get_metric_aggregation,
|
37 |
+
is_higher_better,
|
38 |
+
)
|
39 |
+
from lm_eval.caching.cache import load_from_cache, save_to_cache
|
40 |
+
from lm_eval.filters import build_filter_ensemble
|
41 |
+
from lm_eval.prompts import get_prompt
|
42 |
+
|
43 |
+
|
44 |
+
ALL_OUTPUT_TYPES = [
|
45 |
+
"loglikelihood",
|
46 |
+
"multiple_choice",
|
47 |
+
"loglikelihood_rolling",
|
48 |
+
"generate_until",
|
49 |
+
]
|
50 |
+
|
51 |
+
eval_logger = logging.getLogger("lm-eval")
|
52 |
+
|
53 |
+
|
54 |
+
@dataclass
|
55 |
+
class TaskConfig(dict):
|
56 |
+
# task naming/registry
|
57 |
+
task: Optional[str] = None
|
58 |
+
task_alias: Optional[str] = None
|
59 |
+
group: Optional[Union[str, list]] = None
|
60 |
+
group_alias: Optional[Union[str, list]] = None
|
61 |
+
# HF dataset options.
|
62 |
+
# which dataset to use,
|
63 |
+
# and what splits for what purpose
|
64 |
+
dataset_path: Optional[str] = None
|
65 |
+
dataset_name: Optional[str] = None
|
66 |
+
dataset_kwargs: Optional[dict] = None
|
67 |
+
training_split: Optional[str] = None
|
68 |
+
validation_split: Optional[str] = None
|
69 |
+
test_split: Optional[str] = None
|
70 |
+
fewshot_split: Optional[
|
71 |
+
str
|
72 |
+
] = None # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
|
73 |
+
# formatting / prompting options.
|
74 |
+
# see docs/advanced_task_guide.md for more info
|
75 |
+
process_docs: Optional[Callable] = None
|
76 |
+
doc_to_text: Optional[Union[Callable, str]] = None
|
77 |
+
doc_to_target: Optional[Union[Callable, str]] = None
|
78 |
+
doc_to_choice: Optional[Union[Callable, str, dict, list]] = None
|
79 |
+
process_results: Optional[Union[Callable, str]] = None
|
80 |
+
use_prompt: Optional[str] = None
|
81 |
+
description: str = ""
|
82 |
+
target_delimiter: str = " "
|
83 |
+
fewshot_delimiter: str = "\n\n"
|
84 |
+
fewshot_config: Optional[dict] = None
|
85 |
+
# runtime configuration options
|
86 |
+
num_fewshot: Optional[int] = None
|
87 |
+
# scoring options
|
88 |
+
metric_list: Optional[list] = None
|
89 |
+
output_type: OutputType = "generate_until"
|
90 |
+
generation_kwargs: Optional[dict] = None
|
91 |
+
repeats: int = 1
|
92 |
+
filter_list: Optional[Union[str, list]] = None
|
93 |
+
should_decontaminate: bool = False
|
94 |
+
doc_to_decontamination_query: Optional[str] = None
|
95 |
+
metadata: Optional[
|
96 |
+
dict
|
97 |
+
] = None # by default, not used in the code. allows for users to pass arbitrary info to tasks
|
98 |
+
|
99 |
+
def __post_init__(self) -> None:
|
100 |
+
if self.generation_kwargs is not None:
|
101 |
+
if self.output_type != "generate_until":
|
102 |
+
raise ValueError(
|
103 |
+
f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
|
104 |
+
)
|
105 |
+
|
106 |
+
if "temperature" in self.generation_kwargs:
|
107 |
+
self.generation_kwargs["temperature"] = float(
|
108 |
+
self.generation_kwargs["temperature"]
|
109 |
+
)
|
110 |
+
|
111 |
+
if "until" not in self.generation_kwargs:
|
112 |
+
self.generation_kwargs["until"] = [self.fewshot_delimiter]
|
113 |
+
else:
|
114 |
+
if self.output_type == "generate_until":
|
115 |
+
# ensure that we greedily generate in absence of explicit arguments otherwise
|
116 |
+
self.generation_kwargs = {
|
117 |
+
"until": (
|
118 |
+
None
|
119 |
+
if self.fewshot_delimiter is None
|
120 |
+
else [self.fewshot_delimiter]
|
121 |
+
),
|
122 |
+
"do_sample": False,
|
123 |
+
}
|
124 |
+
|
125 |
+
def __getitem__(self, item):
|
126 |
+
return getattr(self, item)
|
127 |
+
|
128 |
+
def __setitem__(self, item, value):
|
129 |
+
return setattr(self, item, value)
|
130 |
+
|
131 |
+
def to_dict(self, keep_callable: bool = False) -> dict:
|
132 |
+
"""dumps the current config as a dictionary object, as a printable format.
|
133 |
+
null fields will not be printed.
|
134 |
+
Used for dumping results alongside full task configuration
|
135 |
+
|
136 |
+
:return: dict
|
137 |
+
A printable dictionary version of the TaskConfig object.
|
138 |
+
|
139 |
+
# TODO: should any default value in the TaskConfig not be printed?
|
140 |
+
"""
|
141 |
+
cfg_dict = asdict(self)
|
142 |
+
# remove values that are `None`
|
143 |
+
for k, v in list(cfg_dict.items()):
|
144 |
+
if v is None:
|
145 |
+
cfg_dict.pop(k)
|
146 |
+
elif k == "metric_list":
|
147 |
+
for metric_dict in v:
|
148 |
+
for metric_key, metric_value in metric_dict.items():
|
149 |
+
if callable(metric_value):
|
150 |
+
metric_dict[metric_key] = self.serialize_function(
|
151 |
+
metric_value, keep_callable=keep_callable
|
152 |
+
)
|
153 |
+
cfg_dict[k] = v
|
154 |
+
elif callable(v):
|
155 |
+
cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
|
156 |
+
return cfg_dict
|
157 |
+
|
158 |
+
def serialize_function(
|
159 |
+
self, value: Union[Callable, str], keep_callable=False
|
160 |
+
) -> Union[Callable, str]:
|
161 |
+
"""Serializes a given function or string.
|
162 |
+
|
163 |
+
If 'keep_callable' is True, the original callable is returned.
|
164 |
+
Otherwise, attempts to return the source code of the callable using 'getsource'.
|
165 |
+
"""
|
166 |
+
if keep_callable:
|
167 |
+
return value
|
168 |
+
else:
|
169 |
+
try:
|
170 |
+
return getsource(value)
|
171 |
+
except (TypeError, OSError):
|
172 |
+
return str(value)
|
173 |
+
|
174 |
+
|
175 |
+
class Task(abc.ABC):
|
176 |
+
"""A task represents an entire benchmark including its dataset, problems,
|
177 |
+
answers, and evaluation methods. See BoolQ for a simple example implementation
|
178 |
+
|
179 |
+
A `doc` can be any python object which represents one instance of evaluation.
|
180 |
+
This is usually a dictionary e.g.
|
181 |
+
{"question": ..., "answer": ...} or
|
182 |
+
{"question": ..., question, answer)
|
183 |
+
"""
|
184 |
+
|
185 |
+
VERSION: Optional[Union[int, str]] = None
|
186 |
+
|
187 |
+
# The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
|
188 |
+
# or a path to a custom `datasets` loading script.
|
189 |
+
DATASET_PATH: Optional[str] = None
|
190 |
+
|
191 |
+
# The name of a subset within `DATASET_PATH`.
|
192 |
+
DATASET_NAME: Optional[str] = None
|
193 |
+
|
194 |
+
OUTPUT_TYPE: Optional[OutputType] = None
|
195 |
+
|
196 |
+
def __init__(
|
197 |
+
self,
|
198 |
+
data_dir: Optional[str] = None,
|
199 |
+
cache_dir: Optional[str] = None,
|
200 |
+
download_mode: Optional[datasets.DownloadMode] = None,
|
201 |
+
config: Optional[Mapping] = None, # Union[dict, TaskConfig]
|
202 |
+
) -> None:
|
203 |
+
"""
|
204 |
+
:param data_dir: str
|
205 |
+
Stores the path to a local folder containing the `Task`'s data files.
|
206 |
+
Use this to specify the path to manually downloaded data (usually when
|
207 |
+
the dataset is not publicly accessible).
|
208 |
+
:param cache_dir: str
|
209 |
+
The directory to read/write the `Task` dataset. This follows the
|
210 |
+
HuggingFace `datasets` API with the default cache directory located at:
|
211 |
+
`~/.cache/huggingface/datasets`
|
212 |
+
NOTE: You can change the cache location globally for a given process
|
213 |
+
to another directory:
|
214 |
+
`export HF_DATASETS_CACHE="/path/to/another/directory"`
|
215 |
+
:param download_mode: datasets.DownloadMode
|
216 |
+
How to treat pre-existing `Task` downloads and data.
|
217 |
+
- `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
|
218 |
+
Reuse download and reuse dataset.
|
219 |
+
- `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
|
220 |
+
Reuse download with fresh dataset.
|
221 |
+
- `datasets.DownloadMode.FORCE_REDOWNLOAD`
|
222 |
+
Fresh download and fresh dataset.
|
223 |
+
"""
|
224 |
+
self.download(data_dir, cache_dir, download_mode)
|
225 |
+
self._training_docs: Optional[list] = None
|
226 |
+
self._fewshot_docs: Optional[list] = None
|
227 |
+
self._instances: Optional[List[Instance]] = None
|
228 |
+
|
229 |
+
self._config: TaskConfig = TaskConfig({**config}) if config else TaskConfig()
|
230 |
+
|
231 |
+
self._filters = [build_filter_ensemble("none", [["take_first", None]])]
|
232 |
+
|
233 |
+
def download(
|
234 |
+
self,
|
235 |
+
data_dir: Optional[str] = None,
|
236 |
+
cache_dir: Optional[str] = None,
|
237 |
+
download_mode=None,
|
238 |
+
) -> None:
|
239 |
+
"""Downloads and returns the task dataset.
|
240 |
+
Override this method to download the dataset from a custom API.
|
241 |
+
|
242 |
+
:param data_dir: str
|
243 |
+
Stores the path to a local folder containing the `Task`'s data files.
|
244 |
+
Use this to specify the path to manually downloaded data (usually when
|
245 |
+
the dataset is not publicly accessible).
|
246 |
+
:param cache_dir: str
|
247 |
+
The directory to read/write the `Task` dataset. This follows the
|
248 |
+
HuggingFace `datasets` API with the default cache directory located at:
|
249 |
+
`~/.cache/huggingface/datasets`
|
250 |
+
NOTE: You can change the cache location globally for a given process
|
251 |
+
by setting the shell environment variable, `HF_DATASETS_CACHE`,
|
252 |
+
to another directory:
|
253 |
+
`export HF_DATASETS_CACHE="/path/to/another/directory"`
|
254 |
+
:param download_mode: datasets.DownloadMode
|
255 |
+
How to treat pre-existing `Task` downloads and data.
|
256 |
+
- `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
|
257 |
+
Reuse download and reuse dataset.
|
258 |
+
- `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
|
259 |
+
Reuse download with fresh dataset.
|
260 |
+
- `datasets.DownloadMode.FORCE_REDOWNLOAD`
|
261 |
+
Fresh download and fresh dataset.
|
262 |
+
"""
|
263 |
+
self.dataset = datasets.load_dataset(
|
264 |
+
path=self.DATASET_PATH,
|
265 |
+
name=self.DATASET_NAME,
|
266 |
+
data_dir=data_dir,
|
267 |
+
cache_dir=cache_dir,
|
268 |
+
download_mode=download_mode,
|
269 |
+
)
|
270 |
+
|
271 |
+
@property
|
272 |
+
def config(self) -> TaskConfig:
|
273 |
+
"""Returns the TaskConfig associated with this class."""
|
274 |
+
return self._config
|
275 |
+
|
276 |
+
@abc.abstractmethod
|
277 |
+
def has_training_docs(self):
|
278 |
+
"""Whether the task has a training set"""
|
279 |
+
pass
|
280 |
+
|
281 |
+
@abc.abstractmethod
|
282 |
+
def has_validation_docs(self):
|
283 |
+
"""Whether the task has a validation set"""
|
284 |
+
pass
|
285 |
+
|
286 |
+
@abc.abstractmethod
|
287 |
+
def has_test_docs(self):
|
288 |
+
"""Whether the task has a test set"""
|
289 |
+
pass
|
290 |
+
|
291 |
+
def training_docs(self) -> Iterable:
|
292 |
+
"""
|
293 |
+
:return: Iterable[obj]
|
294 |
+
A iterable of any object, that doc_to_text can handle
|
295 |
+
"""
|
296 |
+
return []
|
297 |
+
|
298 |
+
def validation_docs(self) -> Iterable:
|
299 |
+
"""
|
300 |
+
:return: Iterable[obj]
|
301 |
+
A iterable of any object, that doc_to_text can handle
|
302 |
+
"""
|
303 |
+
return []
|
304 |
+
|
305 |
+
def test_docs(self) -> Iterable:
|
306 |
+
"""
|
307 |
+
:return: Iterable[obj]
|
308 |
+
A iterable of any object, that doc_to_text can handle
|
309 |
+
"""
|
310 |
+
return []
|
311 |
+
|
312 |
+
def fewshot_docs(self) -> Iterable:
|
313 |
+
"""
|
314 |
+
:return: Iterable[obj]
|
315 |
+
A iterable of any object, that doc_to_text can handle
|
316 |
+
"""
|
317 |
+
if self.has_training_docs():
|
318 |
+
return self.training_docs()
|
319 |
+
elif self.has_validation_docs():
|
320 |
+
return self.validation_docs()
|
321 |
+
else:
|
322 |
+
eval_logger.warning(
|
323 |
+
f"[Task: {self.config.task}] has_training_docs and has_validation_docs are False"
|
324 |
+
", using test_docs as fewshot_docs but this is not recommended."
|
325 |
+
)
|
326 |
+
return self.test_docs()
|
327 |
+
|
328 |
+
def _process_doc(self, doc: dict) -> dict:
|
329 |
+
"""
|
330 |
+
Override this to process (detokenize, strip, replace, etc.) individual
|
331 |
+
documents. This can be used in a map over documents of a data split.
|
332 |
+
E.g. `map(self._process_doc, self.dataset["validation"])`
|
333 |
+
|
334 |
+
:return: dict
|
335 |
+
The processed version of the specified `doc`.
|
336 |
+
"""
|
337 |
+
return doc
|
338 |
+
|
339 |
+
@property
|
340 |
+
def instances(self) -> List[Instance]:
|
341 |
+
"""After calling `task.build_all_requests()`, tasks
|
342 |
+
maintain a list of the dataset instances which will be evaluated.
|
343 |
+
"""
|
344 |
+
return self._instances
|
345 |
+
|
346 |
+
def fewshot_examples(self, k, rnd):
|
347 |
+
if self._training_docs is None:
|
348 |
+
self._training_docs = list(self.training_docs())
|
349 |
+
|
350 |
+
return rnd.sample(self._training_docs, k)
|
351 |
+
|
352 |
+
def doc_to_decontamination_query(self, doc):
|
353 |
+
raise NotImplementedError(
|
354 |
+
"Override doc_to_decontamination_query with document specific decontamination query."
|
355 |
+
)
|
356 |
+
|
357 |
+
@abc.abstractmethod
|
358 |
+
def doc_to_text(self, doc):
|
359 |
+
pass
|
360 |
+
|
361 |
+
@abc.abstractmethod
|
362 |
+
def doc_to_target(self, doc):
|
363 |
+
pass
|
364 |
+
|
365 |
+
def build_all_requests(
|
366 |
+
self,
|
367 |
+
*,
|
368 |
+
limit=None,
|
369 |
+
rank=None,
|
370 |
+
world_size=None,
|
371 |
+
cache_requests=False,
|
372 |
+
rewrite_requests_cache=False,
|
373 |
+
) -> None:
|
374 |
+
"""Build a set of Instances for a task, and store them in task.instances"""
|
375 |
+
|
376 |
+
# used with caching
|
377 |
+
og_limit = limit
|
378 |
+
|
379 |
+
cache_key = f"requests-{self._config.task}-{self.config.num_fewshot}shot-rank{rank}-world_size{world_size}"
|
380 |
+
|
381 |
+
cached_instances = load_from_cache(file_name=cache_key)
|
382 |
+
|
383 |
+
if cache_requests and cached_instances and not rewrite_requests_cache:
|
384 |
+
cached_instances = cached_instances[:limit]
|
385 |
+
|
386 |
+
flattened_instances = [
|
387 |
+
instance
|
388 |
+
for instance_group in cached_instances
|
389 |
+
for instance in instance_group
|
390 |
+
]
|
391 |
+
|
392 |
+
self._instances = flattened_instances
|
393 |
+
return
|
394 |
+
|
395 |
+
eval_logger.info(f"Building contexts for {self.config.task} on rank {rank}...")
|
396 |
+
|
397 |
+
instances = []
|
398 |
+
|
399 |
+
# process all documents when caching is specified for simplicity
|
400 |
+
if (
|
401 |
+
cache_requests
|
402 |
+
and (not cached_instances or rewrite_requests_cache)
|
403 |
+
and limit is not None
|
404 |
+
):
|
405 |
+
limit = None
|
406 |
+
|
407 |
+
doc_id_docs = list(
|
408 |
+
self.doc_iterator(rank=rank, limit=limit, world_size=world_size)
|
409 |
+
)
|
410 |
+
|
411 |
+
num_docs = len(doc_id_docs)
|
412 |
+
|
413 |
+
for doc_id, doc in tqdm(
|
414 |
+
doc_id_docs,
|
415 |
+
total=num_docs,
|
416 |
+
):
|
417 |
+
# sample fewshot context #TODO: need to offset doc_id by rank now!
|
418 |
+
fewshot_ctx = self.fewshot_context(
|
419 |
+
doc,
|
420 |
+
0 if self.config.num_fewshot is None else self.config.num_fewshot,
|
421 |
+
)
|
422 |
+
|
423 |
+
# TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
|
424 |
+
inst = self.construct_requests(
|
425 |
+
doc=doc,
|
426 |
+
ctx=fewshot_ctx,
|
427 |
+
metadata=(self.config["task"], doc_id, self.config.repeats),
|
428 |
+
)
|
429 |
+
|
430 |
+
if not isinstance(inst, list):
|
431 |
+
inst = [inst]
|
432 |
+
|
433 |
+
instances.append(inst)
|
434 |
+
|
435 |
+
# now flatten, this is to allow slicing to work with pickles
|
436 |
+
|
437 |
+
sliced_instances = instances[:og_limit]
|
438 |
+
|
439 |
+
flattened_instances = [
|
440 |
+
instance
|
441 |
+
for instance_group in sliced_instances
|
442 |
+
for instance in instance_group
|
443 |
+
]
|
444 |
+
|
445 |
+
self._instances = flattened_instances
|
446 |
+
|
447 |
+
if len(self._instances) == 0:
|
448 |
+
raise ValueError("task.build_requests() did not find any docs!")
|
449 |
+
|
450 |
+
if cache_requests and (not cached_instances or rewrite_requests_cache):
|
451 |
+
save_to_cache(file_name=cache_key, obj=instances)
|
452 |
+
|
453 |
+
@abc.abstractmethod
|
454 |
+
def construct_requests(self, doc, ctx, **kwargs):
|
455 |
+
"""Uses RequestFactory to construct Requests and returns an iterable of
|
456 |
+
Requests which will be sent to the LM.
|
457 |
+
|
458 |
+
:param doc:
|
459 |
+
The document as returned from training_docs, validation_docs, or test_docs.
|
460 |
+
:param ctx: str
|
461 |
+
The context string, generated by fewshot_context. This includes the natural
|
462 |
+
language description, as well as the few shot examples, and the question
|
463 |
+
part of the document for `doc`.
|
464 |
+
:param doc_idx: int
|
465 |
+
The index of a document within `self.test_docs()` or `self.validation_docs()`,
|
466 |
+
whichever is the main split used.
|
467 |
+
:param repeats: int
|
468 |
+
TODO: update this docstring
|
469 |
+
The number of times each instance in a dataset is inferred on. Defaults to 1,
|
470 |
+
can be increased for techniques like majority voting.
|
471 |
+
"""
|
472 |
+
pass
|
473 |
+
|
474 |
+
@abc.abstractmethod
|
475 |
+
def process_results(self, doc, results):
|
476 |
+
"""Take a single document and the LM results and evaluates, returning a
|
477 |
+
dict where keys are the names of submetrics and values are the values of
|
478 |
+
the metric for that one document
|
479 |
+
|
480 |
+
:param doc:
|
481 |
+
The document as returned from training_docs, validation_docs, or test_docs.
|
482 |
+
:param results:
|
483 |
+
The results of the requests created in construct_requests.
|
484 |
+
"""
|
485 |
+
pass
|
486 |
+
|
487 |
+
@abc.abstractmethod
|
488 |
+
def aggregation(self):
|
489 |
+
"""
|
490 |
+
:returns: {str: [metric_score] -> float}
|
491 |
+
A dictionary where keys are the names of submetrics and values are
|
492 |
+
functions that aggregate a list of metric scores
|
493 |
+
"""
|
494 |
+
pass
|
495 |
+
|
496 |
+
@abc.abstractmethod
|
497 |
+
def higher_is_better(self):
|
498 |
+
"""
|
499 |
+
:returns: {str: bool}
|
500 |
+
A dictionary where keys are the names of submetrics and values are
|
501 |
+
whether a higher value of the submetric is better
|
502 |
+
"""
|
503 |
+
pass
|
504 |
+
|
505 |
+
def get_config(self, key: str) -> Any:
|
506 |
+
return getattr(self._config, key, None)
|
507 |
+
|
508 |
+
@classmethod
|
509 |
+
def count_bytes(cls, doc):
|
510 |
+
"""Used for byte-level perplexity metrics in rolling loglikelihood"""
|
511 |
+
return len(doc.encode("utf-8"))
|
512 |
+
|
513 |
+
@classmethod
|
514 |
+
def count_words(cls, doc):
|
515 |
+
"""Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
|
516 |
+
return len(re.split(r"\s+", doc))
|
517 |
+
|
518 |
+
@utils.positional_deprecated
|
519 |
+
def fewshot_context(
|
520 |
+
self,
|
521 |
+
doc,
|
522 |
+
num_fewshot,
|
523 |
+
rnd=random.Random(1234),
|
524 |
+
description=None,
|
525 |
+
):
|
526 |
+
"""Returns a fewshot context string that is made up of a prepended description
|
527 |
+
(if provided), the `num_fewshot` number of examples, and an appended prompt example.
|
528 |
+
|
529 |
+
:param doc: str
|
530 |
+
The document as returned from training_docs, validation_docs, or test_docs.
|
531 |
+
:param num_fewshot: int
|
532 |
+
The number of fewshot examples to provide in the returned context string.
|
533 |
+
:param rnd: random.Random
|
534 |
+
The pseudo-random number generator used to randomly sample examples.
|
535 |
+
WARNING: This is currently a required arg although it's optionalized with a default `None`.
|
536 |
+
:param description: str
|
537 |
+
The task's description that will be prepended to the fewshot examples.
|
538 |
+
:returns: str
|
539 |
+
The fewshot context.
|
540 |
+
"""
|
541 |
+
if rnd is None:
|
542 |
+
raise ValueError(
|
543 |
+
"A `random.Random` generator argument must be provided to `rnd`"
|
544 |
+
)
|
545 |
+
|
546 |
+
description = description if description else ""
|
547 |
+
|
548 |
+
if num_fewshot == 0:
|
549 |
+
labeled_examples = ""
|
550 |
+
else:
|
551 |
+
# for sets with no training docs, draw from other set *but ensure no overlap with current doc*
|
552 |
+
if self.has_training_docs():
|
553 |
+
fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
|
554 |
+
else:
|
555 |
+
if self._fewshot_docs is None:
|
556 |
+
self._fewshot_docs = list(
|
557 |
+
self.validation_docs()
|
558 |
+
if self.has_validation_docs()
|
559 |
+
else self.test_docs()
|
560 |
+
)
|
561 |
+
|
562 |
+
fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)
|
563 |
+
|
564 |
+
# get rid of the doc that's the one we're evaluating, if it's in the fewshot
|
565 |
+
fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]
|
566 |
+
|
567 |
+
labeled_examples = (
|
568 |
+
"\n\n".join(
|
569 |
+
[
|
570 |
+
self.doc_to_text(doc) + self.doc_to_target(doc)
|
571 |
+
for doc in fewshotex
|
572 |
+
]
|
573 |
+
)
|
574 |
+
+ "\n\n"
|
575 |
+
)
|
576 |
+
|
577 |
+
example = self.doc_to_text(doc)
|
578 |
+
return description + labeled_examples + example
|
579 |
+
|
580 |
+
def apply_filters(self) -> Optional[List[Instance]]:
|
581 |
+
"""Iterates over FilterEnsembles and applies them to instances"""
|
582 |
+
if hasattr(self, "_filters"):
|
583 |
+
for f in self._filters:
|
584 |
+
f.apply(self._instances)
|
585 |
+
else:
|
586 |
+
eval_logger.warning("No filter defined, passing through instances")
|
587 |
+
return self._instances
|
588 |
+
|
589 |
+
def dump_config(self) -> dict:
|
590 |
+
"""Returns the config as a dictionary."""
|
591 |
+
# TODO: this should only return the overrides applied to a non-YAML task's configuration.
|
592 |
+
# (num_fewshot)
|
593 |
+
return self.config.to_dict()
|
594 |
+
|
595 |
+
def set_config(self, key: str, value: Any, update: bool = False) -> None:
|
596 |
+
"""Set or update the configuration for a given key."""
|
597 |
+
if key is None:
|
598 |
+
raise ValueError("Key must be provided.")
|
599 |
+
|
600 |
+
if update:
|
601 |
+
current_value = getattr(self._config, key, {})
|
602 |
+
if not isinstance(current_value, dict):
|
603 |
+
raise TypeError(
|
604 |
+
f"Expected a dict for key '{key}', got {type(current_value).__name__} instead."
|
605 |
+
)
|
606 |
+
current_value.update(value)
|
607 |
+
else:
|
608 |
+
setattr(self._config, key, value)
|
609 |
+
|
610 |
+
def override_metric(self, metric_name: str) -> None:
|
611 |
+
"""
|
612 |
+
Override the default metrics used for evaluation with custom metrics.
|
613 |
+
|
614 |
+
Parameters:
|
615 |
+
- metric_name (str): The name of the custom metric to override. Should be registered in api.metrics.
|
616 |
+
"""
|
617 |
+
(
|
618 |
+
self._metric_fn_list,
|
619 |
+
self._aggregation_list,
|
620 |
+
self._metric_fn_kwargs,
|
621 |
+
self._higher_is_better,
|
622 |
+
) = ({}, {}, {}, {})
|
623 |
+
self._metric_fn_list[metric_name] = get_metric(metric_name)
|
624 |
+
self._aggregation_list[metric_name] = get_metric_aggregation(metric_name)
|
625 |
+
self._higher_is_better[metric_name] = is_higher_better(metric_name)
|
626 |
+
self._metric_fn_kwargs[metric_name] = {}
|
627 |
+
if not isinstance(self, ConfigurableTask):
|
628 |
+
self.process_results = lambda x, y: {metric_name: get_metric(metric_name)}
|
629 |
+
self.aggregation = lambda: {
|
630 |
+
metric_name: get_metric_aggregation(metric_name)
|
631 |
+
}
|
632 |
+
setattr(self._config, "metric_list", [{"metric": metric_name}])
|
633 |
+
setattr(self._config, "process_results", None)
|
634 |
+
|
635 |
+
@property
|
636 |
+
def eval_docs(self) -> Union[datasets.Dataset, List[dict]]:
|
637 |
+
if self.has_test_docs():
|
638 |
+
return self.test_docs()
|
639 |
+
elif self.has_validation_docs():
|
640 |
+
return self.validation_docs()
|
641 |
+
else:
|
642 |
+
raise ValueError(
|
643 |
+
f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
|
644 |
+
)
|
645 |
+
|
646 |
+
def doc_iterator(
|
647 |
+
self, *, rank: int = 0, limit: Union[int, None] = None, world_size: int = 1
|
648 |
+
) -> Iterator[Tuple[int, Any]]:
|
649 |
+
limit = int(limit) if limit else None
|
650 |
+
doc_iterator = utils.create_iterator(
|
651 |
+
enumerate(self.eval_docs),
|
652 |
+
rank=int(rank),
|
653 |
+
limit=limit,
|
654 |
+
world_size=int(world_size),
|
655 |
+
)
|
656 |
+
return doc_iterator
|
657 |
+
|
658 |
+
|
659 |
+
class ConfigurableTask(Task):
|
660 |
+
VERSION = "Yaml"
|
661 |
+
OUTPUT_TYPE = None
|
662 |
+
CONFIG = None
|
663 |
+
|
664 |
+
def __init__(
|
665 |
+
self,
|
666 |
+
data_dir=None,
|
667 |
+
cache_dir=None,
|
668 |
+
download_mode=None,
|
669 |
+
config: Optional[dict] = None,
|
670 |
+
) -> None: # TODO no super() call here
|
671 |
+
# Get pre-configured attributes
|
672 |
+
self._config = self.CONFIG
|
673 |
+
|
674 |
+
# Use new configurations if there was no preconfiguration
|
675 |
+
if self.config is None:
|
676 |
+
self._config = TaskConfig(**config)
|
677 |
+
# Overwrite configs
|
678 |
+
else:
|
679 |
+
if config is not None:
|
680 |
+
self._config.__dict__.update(config)
|
681 |
+
|
682 |
+
if self.config is None:
|
683 |
+
raise ValueError(
|
684 |
+
"Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
|
685 |
+
)
|
686 |
+
|
687 |
+
if isinstance(self.config.metadata, dict):
|
688 |
+
if "version" in self.config.metadata:
|
689 |
+
self.VERSION = self.config.metadata["version"]
|
690 |
+
|
691 |
+
if self.config.output_type is not None:
|
692 |
+
if self.config.output_type not in ALL_OUTPUT_TYPES:
|
693 |
+
raise ValueError(
|
694 |
+
f"Got invalid output_type '{self.config.output_type}', must be in '{','.join(ALL_OUTPUT_TYPES)}'"
|
695 |
+
)
|
696 |
+
self.OUTPUT_TYPE = self.config.output_type
|
697 |
+
|
698 |
+
if self.config.dataset_path is not None:
|
699 |
+
self.DATASET_PATH = self.config.dataset_path
|
700 |
+
|
701 |
+
if self.config.dataset_name is not None:
|
702 |
+
self.DATASET_NAME = self.config.dataset_name
|
703 |
+
|
704 |
+
self._metric_fn_list = {}
|
705 |
+
self._metric_fn_kwargs = {}
|
706 |
+
self._aggregation_list = {}
|
707 |
+
self._higher_is_better = {}
|
708 |
+
|
709 |
+
if self.config.metric_list is None:
|
710 |
+
# TODO: handle this in TaskConfig.__post_init__ ?
|
711 |
+
_metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type]
|
712 |
+
|
713 |
+
for metric_name in _metric_list:
|
714 |
+
self._metric_fn_list[metric_name] = get_metric(metric_name)
|
715 |
+
self._metric_fn_kwargs[metric_name] = {}
|
716 |
+
self._aggregation_list[metric_name] = get_metric_aggregation(
|
717 |
+
metric_name
|
718 |
+
)
|
719 |
+
self._higher_is_better[metric_name] = is_higher_better(metric_name)
|
720 |
+
else:
|
721 |
+
for metric_config in self.config.metric_list:
|
722 |
+
if "metric" not in metric_config:
|
723 |
+
raise ValueError(
|
724 |
+
"'metric' key not provided for an entry in 'metric_list', must be specified!"
|
725 |
+
)
|
726 |
+
metric_name = metric_config["metric"]
|
727 |
+
kwargs = {
|
728 |
+
key: metric_config[key]
|
729 |
+
for key in metric_config
|
730 |
+
if key
|
731 |
+
not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
|
732 |
+
}
|
733 |
+
hf_evaluate_metric = (
|
734 |
+
"hf_evaluate" in metric_config
|
735 |
+
and metric_config["hf_evaluate"] is True
|
736 |
+
)
|
737 |
+
|
738 |
+
if self.config.process_results is not None:
|
739 |
+
self._metric_fn_list[metric_name] = None
|
740 |
+
self._metric_fn_kwargs[metric_name] = {}
|
741 |
+
elif callable(metric_name):
|
742 |
+
metric_fn = metric_name.__call__
|
743 |
+
metric_name = metric_name.__name__
|
744 |
+
self._metric_fn_list[metric_name] = metric_fn
|
745 |
+
self._metric_fn_kwargs[metric_name] = kwargs
|
746 |
+
else:
|
747 |
+
self._metric_fn_list[metric_name] = get_metric(
|
748 |
+
metric_name, hf_evaluate_metric
|
749 |
+
)
|
750 |
+
self._metric_fn_kwargs[metric_name] = kwargs
|
751 |
+
|
752 |
+
if "aggregation" in metric_config:
|
753 |
+
agg_name = metric_config["aggregation"]
|
754 |
+
if isinstance(agg_name, str):
|
755 |
+
self._aggregation_list[metric_name] = get_aggregation(agg_name)
|
756 |
+
elif callable(agg_name): # noqa: E721
|
757 |
+
self._aggregation_list[metric_name] = metric_config[
|
758 |
+
"aggregation"
|
759 |
+
]
|
760 |
+
else:
|
761 |
+
INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
|
762 |
+
metric_agg = get_metric_aggregation(metric_name)
|
763 |
+
eval_logger.warning(
|
764 |
+
f"[Task: {self.config.task}] metric {metric_name} is defined, but aggregation is not. "
|
765 |
+
f"using default "
|
766 |
+
f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
|
767 |
+
)
|
768 |
+
self._aggregation_list[metric_name] = metric_agg
|
769 |
+
|
770 |
+
if "higher_is_better" in metric_config:
|
771 |
+
self._higher_is_better[metric_name] = metric_config[
|
772 |
+
"higher_is_better"
|
773 |
+
]
|
774 |
+
else:
|
775 |
+
eval_logger.warning(
|
776 |
+
f"[Task: {self.config.task}] metric {metric_name} is defined, but higher_is_better is not. "
|
777 |
+
f"using default "
|
778 |
+
f"higher_is_better={is_higher_better(metric_name)}"
|
779 |
+
)
|
780 |
+
self._higher_is_better[metric_name] = is_higher_better(metric_name)
|
781 |
+
|
782 |
+
self.download(self.config.dataset_kwargs)
|
783 |
+
self._training_docs = None
|
784 |
+
self._fewshot_docs = None
|
785 |
+
|
786 |
+
if self.config.filter_list is not None:
|
787 |
+
self._filters = []
|
788 |
+
for filter_config in self.config.filter_list:
|
789 |
+
filter_name = filter_config["name"]
|
790 |
+
filter_functions = filter_config["filter"]
|
791 |
+
components = []
|
792 |
+
for function in filter_functions:
|
793 |
+
kwargs = {
|
794 |
+
key: function[key] for key in function if key != "function"
|
795 |
+
}
|
796 |
+
components.append([function["function"], kwargs])
|
797 |
+
filter_pipeline = build_filter_ensemble(filter_name, components)
|
798 |
+
self._filters.append(filter_pipeline)
|
799 |
+
else:
|
800 |
+
self._filters = [build_filter_ensemble("none", [["take_first", None]])]
|
801 |
+
|
802 |
+
if self.config.use_prompt is not None:
|
803 |
+
eval_logger.info(f"loading prompt {self.config.use_prompt}")
|
804 |
+
self.prompt = get_prompt(
|
805 |
+
self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
|
806 |
+
)
|
807 |
+
else:
|
808 |
+
self.prompt = None
|
809 |
+
|
810 |
+
if self.fewshot_docs() is not None:
|
811 |
+
self.sampler = samplers.get_sampler(
|
812 |
+
self.config.fewshot_config.get("sampler", "default")
|
813 |
+
if self.config.fewshot_config
|
814 |
+
else "default"
|
815 |
+
)(list(self.fewshot_docs()), self, rnd=random.Random(1234))
|
816 |
+
|
817 |
+
self.task_docs = self.eval_docs
|
818 |
+
|
819 |
+
# Test One Doc
|
820 |
+
self.features = list(self.task_docs.features.keys())
|
821 |
+
self.multiple_input = 0
|
822 |
+
self.multiple_target = 0
|
823 |
+
test_doc = self.task_docs[0]
|
824 |
+
test_text = self.doc_to_text(test_doc)
|
825 |
+
test_target = self.doc_to_target(test_doc)
|
826 |
+
|
827 |
+
if self.config.doc_to_choice is not None:
|
828 |
+
test_choice = self.doc_to_choice(test_doc)
|
829 |
+
if not isinstance(test_choice, list):
|
830 |
+
eval_logger.error("doc_to_choice must return list")
|
831 |
+
else:
|
832 |
+
num_choice = len(test_choice)
|
833 |
+
|
834 |
+
if isinstance(test_text, int):
|
835 |
+
self.multiple_input = num_choice
|
836 |
+
else:
|
837 |
+
test_choice = None
|
838 |
+
|
839 |
+
if isinstance(test_target, list):
|
840 |
+
self.multiple_target = len(test_target)
|
841 |
+
else:
|
842 |
+
if (isinstance(test_target, int)) and (test_choice is not None):
|
843 |
+
test_target = test_choice[test_target]
|
844 |
+
else:
|
845 |
+
test_target = str(test_target)
|
846 |
+
|
847 |
+
if test_choice is not None:
|
848 |
+
check_choices = test_choice
|
849 |
+
else:
|
850 |
+
check_choices = [test_target]
|
851 |
+
if self.config.doc_to_choice is not None:
|
852 |
+
for choice in check_choices:
|
853 |
+
choice_has_whitespace = True if choice[0].isspace() else False
|
854 |
+
delimiter_has_whitespace = (
|
855 |
+
True
|
856 |
+
if self.config.target_delimiter.rstrip()
|
857 |
+
!= self.config.target_delimiter
|
858 |
+
else False
|
859 |
+
)
|
860 |
+
|
861 |
+
if delimiter_has_whitespace and choice_has_whitespace:
|
862 |
+
eval_logger.debug(
|
863 |
+
f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace'
|
864 |
+
)
|
865 |
+
elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
|
866 |
+
eval_logger.debug(
|
867 |
+
f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
|
868 |
+
)
|
869 |
+
|
870 |
+
def download(self, dataset_kwargs: Optional[Dict[str, Any]] = None) -> None:
|
871 |
+
self.dataset = datasets.load_dataset(
|
872 |
+
path=self.DATASET_PATH,
|
873 |
+
name=self.DATASET_NAME,
|
874 |
+
**dataset_kwargs if dataset_kwargs is not None else {},
|
875 |
+
)
|
876 |
+
|
877 |
+
def has_training_docs(self) -> bool:
|
878 |
+
if self.config.training_split is not None:
|
879 |
+
return True
|
880 |
+
else:
|
881 |
+
return False
|
882 |
+
|
883 |
+
def has_validation_docs(self) -> bool:
|
884 |
+
if self.config.validation_split is not None:
|
885 |
+
return True
|
886 |
+
else:
|
887 |
+
return False
|
888 |
+
|
889 |
+
def has_test_docs(self) -> bool:
|
890 |
+
if self.config.test_split is not None:
|
891 |
+
return True
|
892 |
+
else:
|
893 |
+
return False
|
894 |
+
|
895 |
+
def training_docs(self) -> datasets.Dataset:
|
896 |
+
if self.has_training_docs():
|
897 |
+
if self.config.process_docs is not None:
|
898 |
+
return self.config.process_docs(
|
899 |
+
self.dataset[self.config.training_split]
|
900 |
+
)
|
901 |
+
return self.dataset[self.config.training_split]
|
902 |
+
|
903 |
+
def validation_docs(self) -> datasets.Dataset:
|
904 |
+
if self.has_validation_docs():
|
905 |
+
if self.config.process_docs is not None:
|
906 |
+
return self.config.process_docs(
|
907 |
+
self.dataset[self.config.validation_split]
|
908 |
+
)
|
909 |
+
return self.dataset[self.config.validation_split]
|
910 |
+
|
911 |
+
def test_docs(self) -> datasets.Dataset:
|
912 |
+
if self.has_test_docs():
|
913 |
+
if self.config.process_docs is not None:
|
914 |
+
return self.config.process_docs(self.dataset[self.config.test_split])
|
915 |
+
return self.dataset[self.config.test_split]
|
916 |
+
|
917 |
+
def fewshot_docs(self):
|
918 |
+
if self.config.fewshot_split is not None:
|
919 |
+
if self.config.process_docs is not None:
|
920 |
+
return self.config.process_docs(self.dataset[self.config.fewshot_split])
|
921 |
+
return self.dataset[self.config.fewshot_split]
|
922 |
+
else:
|
923 |
+
if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0):
|
924 |
+
eval_logger.warning(
|
925 |
+
f"Task '{self.config.task}': "
|
926 |
+
"num_fewshot > 0 but fewshot_split is None. "
|
927 |
+
"using preconfigured rule."
|
928 |
+
)
|
929 |
+
return super().fewshot_docs()
|
930 |
+
|
931 |
+
@utils.positional_deprecated
|
932 |
+
def fewshot_context(self, doc: str, num_fewshot: int) -> str:
|
933 |
+
"""Returns a fewshot context string that is made up of a prepended description
|
934 |
+
(if provided), the `num_fewshot` number of examples, and an appended prompt example.
|
935 |
+
|
936 |
+
:param doc: str
|
937 |
+
The document as returned from training_docs, validation_docs, or test_docs.
|
938 |
+
:param num_fewshot: int
|
939 |
+
The number of fewshot examples to provide in the returned context string.
|
940 |
+
:returns: str
|
941 |
+
The fewshot context.
|
942 |
+
"""
|
943 |
+
if description := self.config.description:
|
944 |
+
description = utils.apply_template(self.config.description, doc)
|
945 |
+
|
946 |
+
if num_fewshot == 0:
|
947 |
+
# always prepend the (possibly empty) task description
|
948 |
+
labeled_examples = description
|
949 |
+
else:
|
950 |
+
labeled_examples = description + self.sampler.get_context(doc, num_fewshot)
|
951 |
+
|
952 |
+
example = self.doc_to_text(doc)
|
953 |
+
if self.multiple_input:
|
954 |
+
return labeled_examples
|
955 |
+
else:
|
956 |
+
if isinstance(example, str):
|
957 |
+
return labeled_examples + example
|
958 |
+
elif isinstance(example, list):
|
959 |
+
return [labeled_examples + ex for ex in example]
|
960 |
+
elif isinstance(example, int):
|
961 |
+
if self.config.doc_to_choice is not None:
|
962 |
+
choices = self.doc_to_choice(doc)
|
963 |
+
return labeled_examples + choices[example]
|
964 |
+
else:
|
965 |
+
return labeled_examples + str(example)
|
966 |
+
|
967 |
+
def apply_filters(self):
|
968 |
+
"""Iterates over FilterEnsembles and applies them to instances"""
|
969 |
+
if hasattr(self, "_filters"):
|
970 |
+
for f in self._filters:
|
971 |
+
f.apply(self._instances)
|
972 |
+
else:
|
973 |
+
eval_logger.warning("No filter defined, passing through instances")
|
974 |
+
return self._instances
|
975 |
+
|
976 |
+
def should_decontaminate(self):
|
977 |
+
return self.config.should_decontaminate
|
978 |
+
|
979 |
+
def doc_to_decontamination_query(self, doc):
|
980 |
+
if self.config.should_decontaminate:
|
981 |
+
if self.config.doc_to_decontamination_query is None:
|
982 |
+
return self.doc_to_text(doc)
|
983 |
+
else:
|
984 |
+
doc_to_decontamination_query = self.config.doc_to_decontamination_query
|
985 |
+
if doc_to_decontamination_query in self.features:
|
986 |
+
return doc[doc_to_decontamination_query]
|
987 |
+
elif callable(doc_to_decontamination_query):
|
988 |
+
return doc_to_decontamination_query(doc)
|
989 |
+
else:
|
990 |
+
return ast.literal_eval(
|
991 |
+
utils.apply_template(
|
992 |
+
self.config.doc_to_decontamination_query, doc
|
993 |
+
)
|
994 |
+
)
|
995 |
+
|
996 |
+
def _process_doc(self, doc: dict) -> dict:
|
997 |
+
"""
|
998 |
+
Override this to process (detokenize, strip, replace, etc.) individual
|
999 |
+
documents. This can be used in a map over documents of a data split.
|
1000 |
+
E.g. `map(self._process_doc, self.dataset["validation"])`
|
1001 |
+
|
1002 |
+
:return: dict
|
1003 |
+
The processed version of the specified `doc`.
|
1004 |
+
"""
|
1005 |
+
return doc
|
1006 |
+
|
1007 |
+
def doc_to_text(self, doc):
|
1008 |
+
if self.prompt is not None:
|
1009 |
+
doc_to_text = self.prompt
|
1010 |
+
else:
|
1011 |
+
doc_to_text = self.config.doc_to_text
|
1012 |
+
|
1013 |
+
if isinstance(doc_to_text, int):
|
1014 |
+
return doc_to_text
|
1015 |
+
elif isinstance(doc_to_text, str):
|
1016 |
+
if doc_to_text in self.features:
|
1017 |
+
# if self.config.doc_to_choice is not None:
|
1018 |
+
# return self.doc_to_choice(doc)[doc[doc_to_text]]
|
1019 |
+
# else:
|
1020 |
+
return doc[doc_to_text]
|
1021 |
+
else:
|
1022 |
+
text_string = utils.apply_template(doc_to_text, doc)
|
1023 |
+
if text_string.isdigit() and self._config.doc_to_choice is not None:
|
1024 |
+
return ast.literal_eval(text_string)
|
1025 |
+
else:
|
1026 |
+
return text_string
|
1027 |
+
elif callable(doc_to_text):
|
1028 |
+
return doc_to_text(doc)
|
1029 |
+
# Used when applying a Promptsource template
|
1030 |
+
elif hasattr(doc_to_text, "apply"):
|
1031 |
+
applied_prompt = doc_to_text.apply(doc)
|
1032 |
+
if len(applied_prompt) == 2:
|
1033 |
+
return applied_prompt[0]
|
1034 |
+
else:
|
1035 |
+
eval_logger.warning("Applied prompt returns empty string")
|
1036 |
+
return self.config.fewshot_delimiter
|
1037 |
+
else:
|
1038 |
+
print(type(doc_to_text))
|
1039 |
+
raise TypeError
|
1040 |
+
|
1041 |
+
def doc_to_target(self, doc: Mapping) -> Union[int, str, list]:
|
1042 |
+
if self.prompt is not None:
|
1043 |
+
doc_to_target = self.prompt
|
1044 |
+
else:
|
1045 |
+
doc_to_target = self.config.doc_to_target
|
1046 |
+
|
1047 |
+
if isinstance(doc_to_target, int):
|
1048 |
+
return doc_to_target
|
1049 |
+
elif isinstance(doc_to_target, str):
|
1050 |
+
if doc_to_target in self.features:
|
1051 |
+
# if self.config.doc_to_choice is not None:
|
1052 |
+
# return self.doc_to_choice(doc)[doc[doc_to_target]]
|
1053 |
+
# else:
|
1054 |
+
return doc[doc_to_target]
|
1055 |
+
else:
|
1056 |
+
target_string = utils.apply_template(doc_to_target, doc)
|
1057 |
+
if target_string.isdigit() and self._config.doc_to_choice is not None:
|
1058 |
+
return ast.literal_eval(target_string)
|
1059 |
+
elif (
|
1060 |
+
len(target_string) >= 2
|
1061 |
+
and (target_string[0] == "[")
|
1062 |
+
and (target_string[-1] == "]")
|
1063 |
+
):
|
1064 |
+
try:
|
1065 |
+
return ast.literal_eval(target_string)
|
1066 |
+
except (SyntaxError, ValueError):
|
1067 |
+
return target_string
|
1068 |
+
else:
|
1069 |
+
return target_string
|
1070 |
+
elif isinstance(doc_to_target, list):
|
1071 |
+
return doc_to_target
|
1072 |
+
elif callable(doc_to_target):
|
1073 |
+
return doc_to_target(doc)
|
1074 |
+
# Used when applying a Promptsource template
|
1075 |
+
elif hasattr(doc_to_target, "apply"):
|
1076 |
+
applied_prompt = doc_to_target.apply(doc)
|
1077 |
+
if len(applied_prompt) == 2:
|
1078 |
+
return applied_prompt[1]
|
1079 |
+
else:
|
1080 |
+
eval_logger.warning("Applied prompt returns empty string")
|
1081 |
+
return self.config.fewshot_delimiter
|
1082 |
+
else:
|
1083 |
+
raise TypeError
|
1084 |
+
|
1085 |
+
def doc_to_choice(self, doc: Any) -> List[str]:
|
1086 |
+
if self.prompt is not None:
|
1087 |
+
doc_to_choice = self.prompt
|
1088 |
+
elif self.config.doc_to_choice is None:
|
1089 |
+
eval_logger.error("doc_to_choice was called but not set in config")
|
1090 |
+
else:
|
1091 |
+
doc_to_choice = self.config.doc_to_choice
|
1092 |
+
|
1093 |
+
if isinstance(doc_to_choice, str):
|
1094 |
+
if doc_to_choice in self.features:
|
1095 |
+
return doc[doc_to_choice]
|
1096 |
+
else:
|
1097 |
+
return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
|
1098 |
+
elif isinstance(doc_to_choice, list):
|
1099 |
+
return doc_to_choice
|
1100 |
+
elif isinstance(doc_to_choice, dict):
|
1101 |
+
return list(doc_to_choice.values())
|
1102 |
+
elif callable(doc_to_choice):
|
1103 |
+
return doc_to_choice(doc)
|
1104 |
+
elif hasattr(doc_to_choice, "get_answer_choices_list"):
|
1105 |
+
return doc_to_choice.get_answer_choices_list(doc)
|
1106 |
+
else:
|
1107 |
+
raise TypeError
|
1108 |
+
|
1109 |
+
def construct_requests(
|
1110 |
+
self, doc: dict, ctx: str, **kwargs
|
1111 |
+
) -> Union[List[Instance], Instance]:
|
1112 |
+
if self.OUTPUT_TYPE == "loglikelihood":
|
1113 |
+
arguments = (ctx, self.doc_to_target(doc))
|
1114 |
+
elif self.OUTPUT_TYPE == "loglikelihood_rolling":
|
1115 |
+
arguments = (self.doc_to_target(doc),)
|
1116 |
+
elif self.OUTPUT_TYPE == "multiple_choice":
|
1117 |
+
choices = self.doc_to_choice(doc)
|
1118 |
+
target_delimiter = self.config.target_delimiter
|
1119 |
+
if self.multiple_input:
|
1120 |
+
# If there are multiple inputs, choices are placed in the ctx
|
1121 |
+
cont = self.doc_to_target(doc)
|
1122 |
+
arguments = [
|
1123 |
+
(ctx + choice, f"{target_delimiter}{cont}") for choice in choices
|
1124 |
+
]
|
1125 |
+
else:
|
1126 |
+
# Otherwise they are placed in the continuation
|
1127 |
+
arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
|
1128 |
+
|
1129 |
+
request_list = [
|
1130 |
+
Instance(
|
1131 |
+
request_type="loglikelihood",
|
1132 |
+
doc=doc,
|
1133 |
+
arguments=arg,
|
1134 |
+
idx=i,
|
1135 |
+
**kwargs,
|
1136 |
+
)
|
1137 |
+
for i, arg in enumerate(arguments)
|
1138 |
+
]
|
1139 |
+
# TODO: we should raise a warning telling users this will at most ~2x runtime.
|
1140 |
+
if "acc_mutual_info" in self._metric_fn_list.keys():
|
1141 |
+
# if we are calculating multiple choice accuracy
|
1142 |
+
# using mutual information instead of raw loglikelihood as metric, need unconditional lls.
|
1143 |
+
|
1144 |
+
# here mutual info refers to calculating
|
1145 |
+
# log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
|
1146 |
+
# in other words normalizing by subtracting the unconditional logprob of each choice.
|
1147 |
+
request_list.extend(
|
1148 |
+
[
|
1149 |
+
Instance(
|
1150 |
+
request_type="loglikelihood",
|
1151 |
+
doc=doc,
|
1152 |
+
arguments=("", "{}".format(choice)),
|
1153 |
+
idx=i,
|
1154 |
+
**kwargs,
|
1155 |
+
)
|
1156 |
+
for i, choice in enumerate(choices)
|
1157 |
+
]
|
1158 |
+
)
|
1159 |
+
return request_list
|
1160 |
+
|
1161 |
+
elif self.OUTPUT_TYPE == "generate_until":
|
1162 |
+
arguments = (ctx, deepcopy(self.config.generation_kwargs))
|
1163 |
+
|
1164 |
+
return Instance(
|
1165 |
+
request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
|
1166 |
+
)
|
1167 |
+
|
1168 |
+
def process_results(self, doc, results):
|
1169 |
+
if callable(self.config.process_results):
|
1170 |
+
return self.config.process_results(doc, results)
|
1171 |
+
|
1172 |
+
result_dict = {}
|
1173 |
+
use_metric = list(self._metric_fn_list.keys())
|
1174 |
+
if self.OUTPUT_TYPE == "loglikelihood":
|
1175 |
+
results = results[0]
|
1176 |
+
ll, is_greedy = results
|
1177 |
+
return {
|
1178 |
+
**({"perplexity": ll} if "perplexity" in use_metric else {}),
|
1179 |
+
**({"acc": int(is_greedy)} if "acc" in use_metric else {}),
|
1180 |
+
}
|
1181 |
+
elif self.OUTPUT_TYPE == "loglikelihood_rolling":
|
1182 |
+
(loglikelihood,) = results
|
1183 |
+
_words = self.count_words(self.doc_to_target(doc))
|
1184 |
+
_bytes = self.count_bytes(self.doc_to_target(doc))
|
1185 |
+
return {
|
1186 |
+
**(
|
1187 |
+
{"word_perplexity": (loglikelihood, _words)}
|
1188 |
+
if "word_perplexity" in use_metric
|
1189 |
+
else {}
|
1190 |
+
),
|
1191 |
+
**(
|
1192 |
+
{"byte_perplexity": (loglikelihood, _bytes)}
|
1193 |
+
if "byte_perplexity" in use_metric
|
1194 |
+
else {}
|
1195 |
+
),
|
1196 |
+
**(
|
1197 |
+
{"bits_per_byte": (loglikelihood, _bytes)}
|
1198 |
+
if "bits_per_byte" in use_metric
|
1199 |
+
else {}
|
1200 |
+
),
|
1201 |
+
}
|
1202 |
+
elif self.OUTPUT_TYPE == "multiple_choice":
|
1203 |
+
lls, is_greedy = zip(*results)
|
1204 |
+
|
1205 |
+
# retrieve choices in List[str] form, to compute choice lengths, etc.
|
1206 |
+
choices = self.doc_to_choice(doc)
|
1207 |
+
completion_len = np.array([float(len(i)) for i in choices])
|
1208 |
+
|
1209 |
+
if (
|
1210 |
+
2 * len(choices) == len(lls)
|
1211 |
+
and "acc_mutual_info" in self._metric_fn_list.keys()
|
1212 |
+
):
|
1213 |
+
# then we are doing mutual info.
|
1214 |
+
# this stores the "dryrun" / unconditional answer loglikelihoods
|
1215 |
+
lls_unconditional = lls[1::2]
|
1216 |
+
if len(lls_unconditional) != len(choices):
|
1217 |
+
raise ValueError
|
1218 |
+
# and this stores our "regular" conditional loglikelihoods
|
1219 |
+
lls = lls[::2]
|
1220 |
+
|
1221 |
+
pred = np.argmax(lls)
|
1222 |
+
pred_norm = np.argmax(lls / completion_len)
|
1223 |
+
|
1224 |
+
if self.multiple_input:
|
1225 |
+
gold = self.doc_to_text(doc)
|
1226 |
+
else:
|
1227 |
+
gold = self.doc_to_target(doc)
|
1228 |
+
|
1229 |
+
gold_index_error = False
|
1230 |
+
if isinstance(gold, list):
|
1231 |
+
gold = [i if i < len(choices) else -100 for i in gold]
|
1232 |
+
if -100 in gold:
|
1233 |
+
gold_index_error = True
|
1234 |
+
else:
|
1235 |
+
if isinstance(gold, int):
|
1236 |
+
gold = gold if gold < len(choices) else -100
|
1237 |
+
elif isinstance(gold, str):
|
1238 |
+
gold = choices.index(gold) if gold in choices else -100
|
1239 |
+
|
1240 |
+
if gold == -100:
|
1241 |
+
gold_index_error = True
|
1242 |
+
|
1243 |
+
if gold_index_error:
|
1244 |
+
eval_logger.warning(
|
1245 |
+
f"Label index was not in within range of available choices,"
|
1246 |
+
f"Sample:\n\n{doc}\n\n"
|
1247 |
+
)
|
1248 |
+
|
1249 |
+
if self.multiple_target:
|
1250 |
+
acc = 1.0 if pred in gold else 0.0
|
1251 |
+
acc_norm = 1.0 if pred_norm in gold else 0.0
|
1252 |
+
exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
|
1253 |
+
else:
|
1254 |
+
acc = 1.0 if pred == gold else 0.0
|
1255 |
+
acc_norm = 1.0 if pred_norm == gold else 0.0
|
1256 |
+
# TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
|
1257 |
+
exact_match = int(is_greedy[gold]) if gold != -100 else 0
|
1258 |
+
|
1259 |
+
prob_norm = utils.softmax(lls)
|
1260 |
+
|
1261 |
+
# TODO use keyword arguments to the metric?
|
1262 |
+
# gold, pred, norm stuff, the original lls,
|
1263 |
+
result_dict = {
|
1264 |
+
**({"acc": acc} if "acc" in use_metric else {}),
|
1265 |
+
**({"f1": (gold, pred)} if "f1" in use_metric else {}),
|
1266 |
+
**({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
|
1267 |
+
**({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
|
1268 |
+
**({"exact_match": exact_match} if "exact_match" in use_metric else {}),
|
1269 |
+
**(
|
1270 |
+
{"brier_score": (gold, prob_norm)}
|
1271 |
+
if "brier_score" in use_metric
|
1272 |
+
else {}
|
1273 |
+
),
|
1274 |
+
}
|
1275 |
+
|
1276 |
+
if "acc_mutual_info" in use_metric:
|
1277 |
+
lls_mutual_info = [
|
1278 |
+
ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
|
1279 |
+
]
|
1280 |
+
acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
|
1281 |
+
result_dict["acc_mutual_info"] = acc_mutual_info
|
1282 |
+
|
1283 |
+
elif self.OUTPUT_TYPE == "generate_until":
|
1284 |
+
gold = self.doc_to_target(doc)
|
1285 |
+
result = results[0]
|
1286 |
+
if self.config.doc_to_choice is not None:
|
1287 |
+
# If you set doc_to_choice,
|
1288 |
+
# it assumes that doc_to_target returns a number.
|
1289 |
+
choices = self.doc_to_choice(doc)
|
1290 |
+
gold = choices[gold]
|
1291 |
+
# we expect multiple_targets to be a list.
|
1292 |
+
elif self.multiple_target:
|
1293 |
+
gold = list(gold)
|
1294 |
+
elif type(gold) != type(result):
|
1295 |
+
# cast gold to the same type as result
|
1296 |
+
gold = type(result)(gold)
|
1297 |
+
|
1298 |
+
for metric in self._metric_fn_list.keys():
|
1299 |
+
if self.multiple_target:
|
1300 |
+
# in the case where we have multiple targets,
|
1301 |
+
# return true if any are true
|
1302 |
+
# TODO: this may break for multipLe_target, non zero-or-1 metrics
|
1303 |
+
scores = []
|
1304 |
+
if not isinstance(gold, list):
|
1305 |
+
# sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
|
1306 |
+
# print(gold)
|
1307 |
+
gold = [gold]
|
1308 |
+
if metric == "exact_match":
|
1309 |
+
result = [result for _ in range(len(gold))]
|
1310 |
+
scores = self._metric_fn_list[metric](
|
1311 |
+
references=gold,
|
1312 |
+
predictions=result,
|
1313 |
+
**self._metric_fn_kwargs[metric],
|
1314 |
+
)[metric]
|
1315 |
+
result_score = 1.0 if scores > 0.0 else 0.0
|
1316 |
+
else:
|
1317 |
+
for gold_option in gold:
|
1318 |
+
try:
|
1319 |
+
result_score = self._metric_fn_list[metric](
|
1320 |
+
references=[gold_option],
|
1321 |
+
predictions=[result],
|
1322 |
+
**self._metric_fn_kwargs[metric],
|
1323 |
+
)
|
1324 |
+
except (
|
1325 |
+
TypeError
|
1326 |
+
): # TODO: this is hacky and I don't want to do it
|
1327 |
+
result_score = self._metric_fn_list[metric](
|
1328 |
+
[gold_option, result]
|
1329 |
+
)
|
1330 |
+
if isinstance(result_score, dict):
|
1331 |
+
# TODO: this handles the case where HF evaluate returns a dict.
|
1332 |
+
result_score = result_score[metric]
|
1333 |
+
scores.append(result_score)
|
1334 |
+
if any(scores):
|
1335 |
+
result_score = 1.0
|
1336 |
+
else:
|
1337 |
+
result_score = 0.0
|
1338 |
+
else:
|
1339 |
+
try:
|
1340 |
+
result_score = self._metric_fn_list[metric](
|
1341 |
+
references=[gold],
|
1342 |
+
predictions=[result],
|
1343 |
+
**self._metric_fn_kwargs[metric],
|
1344 |
+
)
|
1345 |
+
except TypeError: # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
|
1346 |
+
result_score = self._metric_fn_list[metric]([gold, result])
|
1347 |
+
if isinstance(result_score, dict):
|
1348 |
+
# TODO: this handles the case where HF evaluate returns a dict.
|
1349 |
+
result_score = result_score[metric]
|
1350 |
+
result_dict[metric] = result_score
|
1351 |
+
else:
|
1352 |
+
raise ValueError(
|
1353 |
+
f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
|
1354 |
+
"'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'",
|
1355 |
+
)
|
1356 |
+
|
1357 |
+
return result_dict
|
1358 |
+
|
1359 |
+
def aggregation(self) -> dict:
|
1360 |
+
return self._aggregation_list
|
1361 |
+
|
1362 |
+
def higher_is_better(self) -> dict:
|
1363 |
+
return self._higher_is_better
|
1364 |
+
|
1365 |
+
def get_config(self, key: str) -> Any:
|
1366 |
+
return getattr(self._config, key, None)
|
1367 |
+
|
1368 |
+
def __repr__(self):
|
1369 |
+
return (
|
1370 |
+
f"ConfigurableTask(task_name={getattr(self.config, 'task', None)},"
|
1371 |
+
f"group_name={getattr(self.config, 'group', None)},"
|
1372 |
+
f"output_type={self.OUTPUT_TYPE},"
|
1373 |
+
f"num_fewshot={getattr(self.config, 'num_fewshot', None)},"
|
1374 |
+
f"num_samples={len(self.eval_docs)})"
|
1375 |
+
)
|
1376 |
+
|
1377 |
+
|
1378 |
+
class MultipleChoiceTask(Task):
|
1379 |
+
OUTPUT_TYPE = "loglikelihood"
|
1380 |
+
|
1381 |
+
def doc_to_target(self, doc: dict) -> str:
|
1382 |
+
return " " + doc["choices"][doc["gold"]]
|
1383 |
+
|
1384 |
+
def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
|
1385 |
+
# TODO: add mutual info here?
|
1386 |
+
return [
|
1387 |
+
Instance(
|
1388 |
+
request_type="loglikelihood",
|
1389 |
+
doc=doc,
|
1390 |
+
arguments=(ctx, " {}".format(choice)),
|
1391 |
+
idx=i,
|
1392 |
+
**kwargs,
|
1393 |
+
)
|
1394 |
+
for i, choice in enumerate(doc["choices"])
|
1395 |
+
]
|
1396 |
+
|
1397 |
+
def process_results(self, doc: dict, results: Iterable[Tuple[float, bool]]) -> dict:
|
1398 |
+
results = [
|
1399 |
+
res[0] for res in results
|
1400 |
+
] # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
|
1401 |
+
gold = doc["gold"]
|
1402 |
+
|
1403 |
+
acc = 1.0 if np.argmax(results) == gold else 0.0
|
1404 |
+
completion_len = np.array([float(len(i)) for i in doc["choices"]])
|
1405 |
+
acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0
|
1406 |
+
|
1407 |
+
return {
|
1408 |
+
"acc": acc,
|
1409 |
+
"acc_norm": acc_norm,
|
1410 |
+
}
|
1411 |
+
|
1412 |
+
def higher_is_better(self) -> dict:
|
1413 |
+
return {
|
1414 |
+
"acc": True,
|
1415 |
+
"acc_norm": True,
|
1416 |
+
}
|
1417 |
+
|
1418 |
+
def aggregation(self) -> dict:
|
1419 |
+
return {
|
1420 |
+
"acc": mean,
|
1421 |
+
"acc_norm": mean,
|
1422 |
+
}
|
1423 |
+
|
1424 |
+
|
1425 |
+
class PerplexityTask(Task):
|
1426 |
+
OUTPUT_TYPE = "loglikelihood_rolling"
|
1427 |
+
|
1428 |
+
def has_training_docs(self) -> bool:
|
1429 |
+
return False
|
1430 |
+
|
1431 |
+
def fewshot_examples(self, k: int, rnd) -> List:
|
1432 |
+
if k != 0:
|
1433 |
+
raise ValueError(
|
1434 |
+
"The number of fewshot examples must be 0 for perplexity tasks."
|
1435 |
+
)
|
1436 |
+
return []
|
1437 |
+
|
1438 |
+
def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
|
1439 |
+
if num_fewshot != 0:
|
1440 |
+
raise ValueError(
|
1441 |
+
"The number of fewshot examples must be 0 for perplexity tasks."
|
1442 |
+
)
|
1443 |
+
|
1444 |
+
return ""
|
1445 |
+
|
1446 |
+
def higher_is_better(self) -> dict:
|
1447 |
+
return {
|
1448 |
+
"word_perplexity": False,
|
1449 |
+
"byte_perplexity": False,
|
1450 |
+
"bits_per_byte": False,
|
1451 |
+
}
|
1452 |
+
|
1453 |
+
def doc_to_decontamination_query(self, doc):
|
1454 |
+
return doc
|
1455 |
+
|
1456 |
+
def doc_to_text(self, doc) -> str:
|
1457 |
+
return ""
|
1458 |
+
|
1459 |
+
def doc_to_target(self, doc):
|
1460 |
+
return doc
|
1461 |
+
|
1462 |
+
def construct_requests(self, doc: dict, ctx: Optional[str], **kwargs):
|
1463 |
+
if bool(ctx):
|
1464 |
+
raise ValueError
|
1465 |
+
|
1466 |
+
return Instance(
|
1467 |
+
request_type=self.OUTPUT_TYPE,
|
1468 |
+
doc=doc,
|
1469 |
+
arguments=(self.doc_to_target(doc),),
|
1470 |
+
idx=0,
|
1471 |
+
**kwargs,
|
1472 |
+
)
|
1473 |
+
|
1474 |
+
def process_results(self, doc: dict, results: Tuple[float]) -> dict:
|
1475 |
+
(loglikelihood,) = results
|
1476 |
+
words = self.count_words(self.doc_to_target(doc))
|
1477 |
+
bytes_ = self.count_bytes(self.doc_to_target(doc))
|
1478 |
+
return {
|
1479 |
+
"word_perplexity": (loglikelihood, words),
|
1480 |
+
"byte_perplexity": (loglikelihood, bytes_),
|
1481 |
+
"bits_per_byte": (loglikelihood, bytes_),
|
1482 |
+
}
|
1483 |
+
|
1484 |
+
def aggregation(self) -> dict:
|
1485 |
+
return {
|
1486 |
+
"word_perplexity": weighted_perplexity,
|
1487 |
+
"byte_perplexity": weighted_perplexity,
|
1488 |
+
"bits_per_byte": bits_per_byte,
|
1489 |
+
}
|
1490 |
+
|
1491 |
+
@classmethod
|
1492 |
+
def count_bytes(cls, doc) -> int:
|
1493 |
+
return len(doc.encode("utf-8"))
|
1494 |
+
|
1495 |
+
@classmethod
|
1496 |
+
def count_words(cls, doc) -> int:
|
1497 |
+
"""Downstream tasks with custom word boundaries should override this!"""
|
1498 |
+
return len(re.split(r"\s+", doc))
|
lm-evaluation/lm_eval/caching/cache.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import hashlib
|
2 |
+
import os
|
3 |
+
|
4 |
+
import dill
|
5 |
+
|
6 |
+
from lm_eval.utils import eval_logger
|
7 |
+
|
8 |
+
|
9 |
+
MODULE_DIR = os.path.dirname(os.path.realpath(__file__))
|
10 |
+
|
11 |
+
OVERRIDE_PATH = os.getenv("LM_HARNESS_CACHE_PATH")
|
12 |
+
|
13 |
+
|
14 |
+
PATH = OVERRIDE_PATH if OVERRIDE_PATH else f"{MODULE_DIR}/.cache"
|
15 |
+
|
16 |
+
# This should be sufficient for uniqueness
|
17 |
+
HASH_INPUT = "EleutherAI-lm-evaluation-harness"
|
18 |
+
|
19 |
+
HASH_PREFIX = hashlib.sha256(HASH_INPUT.encode("utf-8")).hexdigest()
|
20 |
+
|
21 |
+
FILE_SUFFIX = f".{HASH_PREFIX}.pickle"
|
22 |
+
|
23 |
+
|
24 |
+
def load_from_cache(file_name):
|
25 |
+
try:
|
26 |
+
path = f"{PATH}/{file_name}{FILE_SUFFIX}"
|
27 |
+
|
28 |
+
with open(path, "rb") as file:
|
29 |
+
cached_task_dict = dill.loads(file.read())
|
30 |
+
return cached_task_dict
|
31 |
+
|
32 |
+
except Exception:
|
33 |
+
eval_logger.debug(f"{file_name} is not cached, generating...")
|
34 |
+
pass
|
35 |
+
|
36 |
+
|
37 |
+
def save_to_cache(file_name, obj):
|
38 |
+
if not os.path.exists(PATH):
|
39 |
+
os.mkdir(PATH)
|
40 |
+
|
41 |
+
file_path = f"{PATH}/{file_name}{FILE_SUFFIX}"
|
42 |
+
|
43 |
+
eval_logger.debug(f"Saving {file_path} to cache...")
|
44 |
+
with open(file_path, "wb") as file:
|
45 |
+
file.write(dill.dumps(obj))
|
46 |
+
|
47 |
+
|
48 |
+
# NOTE the "key" param is to allow for flexibility
|
49 |
+
def delete_cache(key: str = ""):
|
50 |
+
files = os.listdir(PATH)
|
51 |
+
|
52 |
+
for file in files:
|
53 |
+
if file.startswith(key) and file.endswith(FILE_SUFFIX):
|
54 |
+
file_path = f"{PATH}/{file}"
|
55 |
+
os.unlink(file_path)
|
lm-evaluation/lm_eval/decontamination/__init__.py
ADDED
File without changes
|
lm-evaluation/lm_eval/decontamination/archiver.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datetime
|
2 |
+
import io
|
3 |
+
import json
|
4 |
+
import mmap
|
5 |
+
import os
|
6 |
+
from pathlib import Path
|
7 |
+
from typing import Any
|
8 |
+
|
9 |
+
import jsonlines
|
10 |
+
import tqdm
|
11 |
+
import zstandard
|
12 |
+
|
13 |
+
|
14 |
+
def json_serial(obj: Any) -> str:
|
15 |
+
"""JSON serializer for objects not serializable by default json code"""
|
16 |
+
|
17 |
+
if isinstance(obj, (datetime.datetime,)):
|
18 |
+
return obj.isoformat()
|
19 |
+
raise TypeError("Type %s not serializable" % type(obj))
|
20 |
+
|
21 |
+
|
22 |
+
# Modified version of lm_dataformat Archive for single file.
|
23 |
+
class Archive:
|
24 |
+
def __init__(self, file_path: str, compression_level: int = 3) -> None:
|
25 |
+
self.file_path = file_path
|
26 |
+
dir_name = os.path.dirname(file_path)
|
27 |
+
if dir_name:
|
28 |
+
os.makedirs(dir_name, exist_ok=True)
|
29 |
+
self.fh = open(self.file_path, "wb")
|
30 |
+
self.cctx = zstandard.ZstdCompressor(level=compression_level)
|
31 |
+
self.compressor = self.cctx.stream_writer(self.fh)
|
32 |
+
|
33 |
+
def add_data(self, data, meta=None) -> None:
|
34 |
+
if meta is None:
|
35 |
+
meta = {}
|
36 |
+
self.compressor.write(
|
37 |
+
json.dumps({"text": data, "meta": meta}, default=json_serial).encode(
|
38 |
+
"UTF-8"
|
39 |
+
)
|
40 |
+
+ b"\n"
|
41 |
+
)
|
42 |
+
|
43 |
+
def commit(self) -> None:
|
44 |
+
self.compressor.flush(zstandard.FLUSH_FRAME)
|
45 |
+
self.fh.flush()
|
46 |
+
self.fh.close()
|
47 |
+
|
48 |
+
|
49 |
+
# Modified version of lm_dataformat Reader with self.fh set, allowing peeking for tqdm.
|
50 |
+
class Reader:
|
51 |
+
def __init__(self) -> None:
|
52 |
+
pass
|
53 |
+
|
54 |
+
def read(
|
55 |
+
self,
|
56 |
+
file,
|
57 |
+
get_meta: bool = False,
|
58 |
+
autojoin_paragraphs: bool = True,
|
59 |
+
para_joiner: str = "\n\n",
|
60 |
+
):
|
61 |
+
with open(file, "rb") as fh:
|
62 |
+
self.fh = fh
|
63 |
+
cctx = zstandard.ZstdDecompressor()
|
64 |
+
reader = io.BufferedReader(cctx.stream_reader(fh))
|
65 |
+
rdr = jsonlines.Reader(reader)
|
66 |
+
for ob in rdr:
|
67 |
+
# naive jsonl where each object is just the string itself, with no meta. For legacy compatibility.
|
68 |
+
if isinstance(ob, str):
|
69 |
+
assert not get_meta
|
70 |
+
yield ob
|
71 |
+
continue
|
72 |
+
|
73 |
+
text = ob["text"]
|
74 |
+
|
75 |
+
if autojoin_paragraphs and isinstance(text, list):
|
76 |
+
text = para_joiner.join(text)
|
77 |
+
|
78 |
+
if get_meta:
|
79 |
+
yield text, (ob["meta"] if "meta" in ob else {})
|
80 |
+
else:
|
81 |
+
yield text
|
82 |
+
|
83 |
+
|
84 |
+
class TextArchive:
|
85 |
+
def __init__(self, file_path, mode: str = "rb+") -> None:
|
86 |
+
self.file_path = file_path
|
87 |
+
dir_name = os.path.dirname(file_path)
|
88 |
+
if dir_name:
|
89 |
+
os.makedirs(dir_name, exist_ok=True)
|
90 |
+
|
91 |
+
if not os.path.exists(file_path):
|
92 |
+
Path(file_path).touch()
|
93 |
+
|
94 |
+
self.fh = open(self.file_path, mode)
|
95 |
+
|
96 |
+
def add_data(self, data) -> None:
|
97 |
+
self.fh.write(data.encode("UTF-8") + b"\n")
|
98 |
+
|
99 |
+
def commit(self) -> None:
|
100 |
+
self.fh.flush()
|
101 |
+
self.fh.close()
|
102 |
+
|
103 |
+
|
104 |
+
class TextReader:
|
105 |
+
def __init__(self, file_path) -> None:
|
106 |
+
self.file_path = file_path
|
107 |
+
|
108 |
+
# Optimized mmap read with infrequent tqdm updates to maintain speed
|
109 |
+
# Tested up to 250MB/s.
|
110 |
+
def read_tqdm(self, update_frequency: int = 10000):
|
111 |
+
current_file_position = 0
|
112 |
+
line_counter = 0
|
113 |
+
with open(self.file_path, "r", encoding="utf-8") as fh, tqdm.tqdm(
|
114 |
+
total=os.path.getsize(self.file_path),
|
115 |
+
dynamic_ncols=True,
|
116 |
+
unit="byte",
|
117 |
+
unit_scale=1,
|
118 |
+
) as progress:
|
119 |
+
with mmap.mmap(fh.fileno(), length=0, access=mmap.ACCESS_READ) as mmap_obj:
|
120 |
+
for line in iter(mmap_obj.readline, b""):
|
121 |
+
line = line.decode("utf-8")
|
122 |
+
line_counter += 1
|
123 |
+
if line_counter == update_frequency:
|
124 |
+
new_file_pos = mmap_obj.tell()
|
125 |
+
bytes_read = new_file_pos - current_file_position
|
126 |
+
current_file_position = new_file_pos
|
127 |
+
progress.update(bytes_read)
|
128 |
+
line_counter = 0
|
129 |
+
yield line[:-1]
|
130 |
+
|
131 |
+
def read_and_tell(self):
|
132 |
+
current_file_position = 0
|
133 |
+
with open(self.file_path, "r", encoding="utf8") as fh:
|
134 |
+
with mmap.mmap(fh.fileno(), length=0, access=mmap.ACCESS_READ) as mmap_obj:
|
135 |
+
for line in iter(mmap_obj.readline, b""):
|
136 |
+
line = line.decode("utf-8")
|
137 |
+
new_file_pos = mmap_obj.tell()
|
138 |
+
raw_bytes_read = new_file_pos - current_file_position
|
139 |
+
current_file_position = new_file_pos
|
140 |
+
yield line[:-1], raw_bytes_read
|
141 |
+
|
142 |
+
def read(self):
|
143 |
+
with open(self.file_path, "r", encoding="utf8") as fh:
|
144 |
+
with mmap.mmap(fh.fileno(), length=0, access=mmap.ACCESS_READ) as mmap_obj:
|
145 |
+
for line in iter(mmap_obj.readline, b""):
|
146 |
+
line = line.decode("utf-8")
|
147 |
+
yield line[:-1]
|
148 |
+
|
149 |
+
def read_slow(self):
|
150 |
+
with open(self.file_path, "r", encoding="utf8") as fh:
|
151 |
+
while True:
|
152 |
+
line = fh.readline()
|
153 |
+
if line == -1 or line == "":
|
154 |
+
break
|
155 |
+
else:
|
156 |
+
yield line[:-1]
|
157 |
+
|
158 |
+
|
159 |
+
# Optimized for speed. Decompresses the archive in shell before
|
160 |
+
# using the mmap'd TextReader.
|
161 |
+
class ZStdTextReader:
|
162 |
+
def __init__(self, file) -> None:
|
163 |
+
self.file = file
|
164 |
+
|
165 |
+
def read_tqdm(self):
|
166 |
+
decompressed_file = self.file[:-4]
|
167 |
+
print("Decompressing file, please wait...")
|
168 |
+
os.system(f"zstd -d {self.file}") # linux decompress is faster
|
169 |
+
reader = TextReader(decompressed_file)
|
170 |
+
yield from reader.read_tqdm()
|
171 |
+
os.remove(decompressed_file)
|
lm-evaluation/lm_eval/decontamination/decontaminate.py
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import collections
|
2 |
+
import glob
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
import pickle
|
6 |
+
import random
|
7 |
+
import time
|
8 |
+
|
9 |
+
from .archiver import ZStdTextReader
|
10 |
+
from .janitor import Janitor, word_ngrams
|
11 |
+
|
12 |
+
|
13 |
+
# Was used for testing the evaluator decoupled from the full logic below
|
14 |
+
def get_train_overlap_stub(docs: dict, ngrams_path: str, ngrams_n_size: str):
|
15 |
+
simulated_overlap = 0.1
|
16 |
+
contaminated = int(len(docs) * simulated_overlap)
|
17 |
+
return random.sample(range(len(docs)), contaminated)
|
18 |
+
|
19 |
+
|
20 |
+
# Returns a dictionary containing all overlapping documents in each
|
21 |
+
# task. In the standard use case, an overlap occurs when any of the 13-grams
|
22 |
+
# found in the task document exist in the training set documents.
|
23 |
+
#
|
24 |
+
# To generate 13-grams for the pile see scripts/clean_training_data. The final output of these
|
25 |
+
# scripts are an info.json file containing the n_gram_size (13) and a bunch of "ngrams_{x}.bkt.txt.sorted.zst"
|
26 |
+
# files. These should exist in the "ngrams_path" provided to this function.
|
27 |
+
|
28 |
+
|
29 |
+
# Algorithm:
|
30 |
+
# 1. Build lookups for each dataset {ngram: list(document_ids)}
|
31 |
+
# 2. Merge into an overall lookup {ngram: [(task_name, task_set, doc_ids),]}
|
32 |
+
# 3. Full scan the 13-grams from the training set against the merged lookup,
|
33 |
+
# saving matches in the "duplicates" dictionary {(task_name, task_set): set(doc_ids)}
|
34 |
+
# 4. Strip the task_set from the dictionary keys and return
|
35 |
+
#
|
36 |
+
# We cache the task+set lookups as well as the overlaps.
|
37 |
+
def get_train_overlap(docs_by_task_set: dict, ngrams_path: str, limit: int) -> dict:
|
38 |
+
# return get_train_overlap_stub(docs, ngrams_path, ngrams_n_size)
|
39 |
+
|
40 |
+
info_dict_path = os.path.join(ngrams_path, "info.json")
|
41 |
+
info_dict = json.load(open(info_dict_path, "r", encoding="utf-8"))
|
42 |
+
ngrams_n_size = info_dict["ngram_size"]
|
43 |
+
|
44 |
+
janitor = Janitor()
|
45 |
+
|
46 |
+
# Build lookup for each dataset first in case we use different task combinations later
|
47 |
+
print("Building Lookups...")
|
48 |
+
start = time.perf_counter()
|
49 |
+
|
50 |
+
def get_overlaps_dump_path(task_name, task_set, ngrams_n_size, limit) -> str:
|
51 |
+
return f"data/{task_name}/{task_set}_{ngrams_n_size}grams_limit{limit}.overlaps"
|
52 |
+
|
53 |
+
lookups = {}
|
54 |
+
duplicates = {} # (task_name, task_set): set(doc_ids)}
|
55 |
+
sets_to_decontaminate = len(docs_by_task_set.keys())
|
56 |
+
|
57 |
+
for (task_name, task_set), docs in docs_by_task_set.items():
|
58 |
+
if not os.path.exists(f"data/{task_name}"):
|
59 |
+
os.mkdir(f"data/{task_name}")
|
60 |
+
|
61 |
+
# Check if we've decontaminated this combination before
|
62 |
+
overlaps_dump_path = get_overlaps_dump_path(
|
63 |
+
task_name, task_set, ngrams_n_size, limit
|
64 |
+
)
|
65 |
+
if os.path.exists(overlaps_dump_path):
|
66 |
+
duplicates[(task_name, task_set)] = pickle.load(
|
67 |
+
open(overlaps_dump_path, "rb")
|
68 |
+
)
|
69 |
+
sets_to_decontaminate -= 1
|
70 |
+
continue
|
71 |
+
else:
|
72 |
+
duplicates[(task_name, task_set)] = set()
|
73 |
+
|
74 |
+
# Build/load the task lookup {ngram: set(documents)}.
|
75 |
+
task_set_lookup_path = (
|
76 |
+
f"data/{task_name}/{task_set}_{ngrams_n_size}grams_limit{limit}.lookup"
|
77 |
+
)
|
78 |
+
if os.path.exists(task_set_lookup_path):
|
79 |
+
print(f"{task_set_lookup_path} available, loading...")
|
80 |
+
lookups[(task_name, task_set)] = pickle.load(
|
81 |
+
open(task_set_lookup_path, "rb")
|
82 |
+
)
|
83 |
+
else:
|
84 |
+
print(f"{task_set_lookup_path} not available, building...")
|
85 |
+
lookup = collections.defaultdict(set)
|
86 |
+
|
87 |
+
for doc_id, document in enumerate(docs):
|
88 |
+
ngrams = word_ngrams(janitor.normalize_string(document), ngrams_n_size)
|
89 |
+
for ngram in ngrams:
|
90 |
+
lookup[ngram].add(doc_id)
|
91 |
+
|
92 |
+
pickle.dump(lookup, open(task_set_lookup_path, "wb"))
|
93 |
+
lookups[(task_name, task_set)] = lookup
|
94 |
+
|
95 |
+
elapsed = time.perf_counter() - start
|
96 |
+
print(f"Building lookups took {elapsed:0.5f} seconds.")
|
97 |
+
|
98 |
+
matched_ngrams = []
|
99 |
+
|
100 |
+
if sets_to_decontaminate > 0:
|
101 |
+
print("Merging lookups...")
|
102 |
+
start = time.perf_counter()
|
103 |
+
merged_lookup = collections.defaultdict(list)
|
104 |
+
for (task_name, task_set), lookup in lookups.items():
|
105 |
+
for ngram, doc_ids in lookup.items():
|
106 |
+
merged_lookup[ngram].append((task_name, task_set, doc_ids))
|
107 |
+
|
108 |
+
elapsed = time.perf_counter() - start
|
109 |
+
print(f"Merging lookups took {elapsed:0.5f} seconds.")
|
110 |
+
|
111 |
+
print(f"{ngrams_n_size} grams files found in {ngrams_path}:")
|
112 |
+
files = glob.glob(os.path.join(ngrams_path, "*.sorted.zst"))
|
113 |
+
print(files)
|
114 |
+
|
115 |
+
for file in files:
|
116 |
+
start = time.perf_counter()
|
117 |
+
print(f"Scanning {file}")
|
118 |
+
reader = ZStdTextReader(file)
|
119 |
+
total_ngrams = 0
|
120 |
+
unique_ngrams = 0
|
121 |
+
matching_unique = 0
|
122 |
+
non_matching_unique = 0
|
123 |
+
|
124 |
+
current_ngram = ""
|
125 |
+
for line in reader.read_tqdm(): # Scan training set ngrams file
|
126 |
+
total_ngrams += 1
|
127 |
+
[ngram, document_id] = line.rsplit(" ", 1)
|
128 |
+
if (
|
129 |
+
ngram != current_ngram
|
130 |
+
): # Only need to match the ngram once in training set
|
131 |
+
unique_ngrams += 1
|
132 |
+
current_ngram = ngram
|
133 |
+
if ngram in merged_lookup:
|
134 |
+
matched_ngrams.append(ngram) # For logging
|
135 |
+
matching_unique += 1
|
136 |
+
for task_name, task_set, doc_ids in merged_lookup[ngram]:
|
137 |
+
task_doc_set = duplicates[(task_name, task_set)]
|
138 |
+
for doc_id in doc_ids: # Record contamination across all relevant task/set combos
|
139 |
+
task_doc_set.add(doc_id)
|
140 |
+
del merged_lookup[ngram] # No point matching again
|
141 |
+
else:
|
142 |
+
non_matching_unique += 1
|
143 |
+
|
144 |
+
print(f"Total Ngrams: {total_ngrams}")
|
145 |
+
print(f"Unique Ngrams: {unique_ngrams}")
|
146 |
+
print(f"Unique Matching: {matching_unique}")
|
147 |
+
print(f"Unique Non Matching: {non_matching_unique}")
|
148 |
+
print("Matched ngrams:")
|
149 |
+
for ngram in matched_ngrams:
|
150 |
+
print(ngram)
|
151 |
+
|
152 |
+
elapsed = time.perf_counter() - start
|
153 |
+
print(f"Read took {elapsed:0.5f} seconds.")
|
154 |
+
print(f"Speed: {(os.path.getsize(file)/1000000.0)/elapsed}MB/second")
|
155 |
+
|
156 |
+
print(duplicates)
|
157 |
+
|
158 |
+
# Dump overlaps separately
|
159 |
+
for (task_name, task_set), doc_ids in duplicates.items():
|
160 |
+
overlaps_dump_path = get_overlaps_dump_path(
|
161 |
+
task_name, task_set, ngrams_n_size, limit
|
162 |
+
)
|
163 |
+
pickle.dump(doc_ids, open(overlaps_dump_path, "wb"))
|
164 |
+
|
165 |
+
# Strip task set and return
|
166 |
+
return {task_name: doc_ids for (task_name, task_set), doc_ids in duplicates.items()}
|
lm-evaluation/lm_eval/decontamination/janitor.py
ADDED
@@ -0,0 +1,328 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pickle
|
2 |
+
import re
|
3 |
+
import string
|
4 |
+
import traceback
|
5 |
+
from typing import Iterator, List, Sequence, Tuple, TypeVar
|
6 |
+
|
7 |
+
|
8 |
+
# This is a cpp module. Compile janitor_util.cpp with:
|
9 |
+
# c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) janitor_util.cpp -o janitor_util$(python3-config --extension-suffix) -undefined dynamic_lookup
|
10 |
+
try:
|
11 |
+
import janitor_util
|
12 |
+
|
13 |
+
JANITOR_CPP = True
|
14 |
+
except Exception:
|
15 |
+
print("WARNING: C++ module could not be loaded. Janitor running in python mode")
|
16 |
+
traceback.print_exc()
|
17 |
+
JANITOR_CPP = False
|
18 |
+
|
19 |
+
T = TypeVar("T")
|
20 |
+
|
21 |
+
|
22 |
+
# Implementation from nltk source
|
23 |
+
# https://www.nltk.org/_modules/nltk/util.html
|
24 |
+
def form_ngrams(sequence: Iterator[T], n: int) -> Iterator[Tuple[T, ...]]:
|
25 |
+
history = []
|
26 |
+
while n > 1:
|
27 |
+
# PEP 479, prevent RuntimeError from being raised when StopIteration bubbles out of generator
|
28 |
+
try:
|
29 |
+
next_item = next(sequence)
|
30 |
+
except StopIteration:
|
31 |
+
# no more data, terminate the generator
|
32 |
+
return
|
33 |
+
history.append(next_item)
|
34 |
+
n -= 1
|
35 |
+
for item in sequence:
|
36 |
+
history.append(item)
|
37 |
+
yield tuple(history)
|
38 |
+
del history[0]
|
39 |
+
|
40 |
+
|
41 |
+
def word_ngrams(s: str, n: int) -> Iterator[str]:
|
42 |
+
"""Splits a string into ngram words"""
|
43 |
+
tokens = s.split() # not a generator :(
|
44 |
+
ngram_seqs = form_ngrams(iter(tokens), n)
|
45 |
+
return (" ".join(ngram) for ngram in ngram_seqs)
|
46 |
+
|
47 |
+
|
48 |
+
# Does character sequences only - combined faster function to play around with later
|
49 |
+
# def word_ngrams_indices_combined(sequence, n):
|
50 |
+
# current_word = ""
|
51 |
+
# history = []
|
52 |
+
# gap = False;
|
53 |
+
# start = 0
|
54 |
+
# end = 0
|
55 |
+
# for character in sequence:
|
56 |
+
# if character == " ":
|
57 |
+
# if not gap:
|
58 |
+
# gap = True
|
59 |
+
# history.append(current_word)
|
60 |
+
# end += len(current_word) - 1
|
61 |
+
# current_word = ""
|
62 |
+
# if len(history) == n:
|
63 |
+
# yield (tuple(history), start, end)
|
64 |
+
# del history[0]
|
65 |
+
# start = end + 1
|
66 |
+
# end = start
|
67 |
+
# else:
|
68 |
+
# gap = False
|
69 |
+
# current_word += character
|
70 |
+
|
71 |
+
|
72 |
+
# https://stackoverflow.com/questions/13734451/string-split-with-indices-in-python
|
73 |
+
def split_indices(s: str) -> Iterator[Tuple[str, Tuple[int, int]]]:
|
74 |
+
"""Splits a string on whitespaces and records the indices of each in the original string.
|
75 |
+
@:return generator((word, (start_idx, end_idx)), ...)
|
76 |
+
"""
|
77 |
+
return ((m.group(0), (m.start(), m.end() - 1)) for m in re.finditer(r"\S+", s))
|
78 |
+
|
79 |
+
|
80 |
+
def word_ngrams_indices(s: str, n: int) -> Iterator[Tuple[str, Tuple[int, int]]]:
|
81 |
+
"""Splits a string into pairs of (ngram words, their start/end indices)"""
|
82 |
+
tokens_with_indices = split_indices(s)
|
83 |
+
|
84 |
+
# Generator of ngrams of (word, idx_pairs)
|
85 |
+
# (
|
86 |
+
# [(word, (start,end)), (word, (start, end))...],
|
87 |
+
# [(word, (start, end)), ...],
|
88 |
+
# ...
|
89 |
+
# )
|
90 |
+
ngram_seqs_with_indices = form_ngrams(tokens_with_indices, n)
|
91 |
+
|
92 |
+
# Generator of pairs of word and index ngrams
|
93 |
+
# (
|
94 |
+
# ([word, word, ...], [(start,end), (start,end), ...]),
|
95 |
+
# ...
|
96 |
+
# )
|
97 |
+
ngram_indices_pairs = (
|
98 |
+
zip(*ngram_with_indices) for ngram_with_indices in ngram_seqs_with_indices
|
99 |
+
)
|
100 |
+
|
101 |
+
# Generator of ( (word_ngram, (start, end)), (word_ngram, start, end)), ...)
|
102 |
+
return (
|
103 |
+
(" ".join(ngram_seq), (indices[0][0], indices[-1][1]))
|
104 |
+
for ngram_seq, indices in ngram_indices_pairs
|
105 |
+
)
|
106 |
+
|
107 |
+
|
108 |
+
class Janitor:
|
109 |
+
# FIXME delete_chars: Should anything else go here? Special chars?
|
110 |
+
def __init__(
|
111 |
+
self,
|
112 |
+
ngram_n: int = 13,
|
113 |
+
window_to_remove: int = 200,
|
114 |
+
too_dirty_cutoff: int = 10,
|
115 |
+
minimum_slice_length: int = 200,
|
116 |
+
delete_chars: str = string.punctuation,
|
117 |
+
) -> None:
|
118 |
+
self.ngram_n = ngram_n
|
119 |
+
self.window_to_remove = window_to_remove
|
120 |
+
self.too_dirty_cutoff = too_dirty_cutoff
|
121 |
+
self.minimum_slice_length = minimum_slice_length
|
122 |
+
self.delete_chars = delete_chars
|
123 |
+
|
124 |
+
self.dirt_ngrams = set()
|
125 |
+
|
126 |
+
# If in python, we'll translate uppercase to lowercase and delete naughty characters.
|
127 |
+
# This is fast by python standards
|
128 |
+
# https://stackoverflow.com/questions/638893/what-is-the-most-efficient-way-in-python-to-convert-a-string-to-all-lowercase-st
|
129 |
+
self.translation_table = str.maketrans(
|
130 |
+
string.ascii_lowercase + string.ascii_uppercase, # These characters
|
131 |
+
string.ascii_lowercase * 2, # Become these characters
|
132 |
+
self.delete_chars, # These are deleted
|
133 |
+
)
|
134 |
+
|
135 |
+
##############
|
136 |
+
# I/O for saving contamination ngrams
|
137 |
+
##############
|
138 |
+
|
139 |
+
def save_contamination_ngrams(self, filename: str) -> None:
|
140 |
+
with open(filename, "wb") as fp:
|
141 |
+
pickle.dump(filename, fp)
|
142 |
+
|
143 |
+
def load_contamination_ngrams(self, filename: str) -> None:
|
144 |
+
with open(filename, "rb") as fp:
|
145 |
+
self.dirt_ngrams = pickle.load(fp)
|
146 |
+
|
147 |
+
##############
|
148 |
+
# Call these :)
|
149 |
+
##############
|
150 |
+
|
151 |
+
def register_contaminant(self, dirt_string: str) -> None:
|
152 |
+
"""Register a string as contamination to be removed, e.g. a test set
|
153 |
+
This breaks the dirt_string into ngrams to store for future cleaning"""
|
154 |
+
if JANITOR_CPP:
|
155 |
+
return self.register_contaminant_cpp(dirt_string)
|
156 |
+
else:
|
157 |
+
print("WARNING: Janitor running in python mode")
|
158 |
+
return self.register_contaminant_python(dirt_string)
|
159 |
+
|
160 |
+
def clean(self, dirty_string: str) -> List[str]:
|
161 |
+
"""Clean a string (e.g. a training set) by removing all ngrams previously
|
162 |
+
registered as contaminants. Returns a list of clean chunks, or empty if
|
163 |
+
the string was too dirty"""
|
164 |
+
if JANITOR_CPP:
|
165 |
+
return self.clean_cpp(dirty_string)
|
166 |
+
else:
|
167 |
+
print("WARNING: Janitor running in python mode")
|
168 |
+
return self.clean_python(dirty_string)
|
169 |
+
|
170 |
+
def _split_chunks(
|
171 |
+
self, dirty_string: str, dirty_parts: Sequence[Tuple]
|
172 |
+
) -> List[str]:
|
173 |
+
clean_chunks = []
|
174 |
+
splice_idx = 0
|
175 |
+
end = -1
|
176 |
+
for i, (ngram, start, end) in enumerate(dirty_parts):
|
177 |
+
if i >= self.too_dirty_cutoff:
|
178 |
+
return []
|
179 |
+
start = max(0, start - self.window_to_remove)
|
180 |
+
end = min(len(dirty_string), end + self.window_to_remove)
|
181 |
+
|
182 |
+
if start - splice_idx > self.minimum_slice_length:
|
183 |
+
clean_chunks.append(dirty_string[splice_idx:start])
|
184 |
+
splice_idx = end
|
185 |
+
|
186 |
+
if end < len(dirty_string) - self.minimum_slice_length:
|
187 |
+
clean_chunks.append(dirty_string[end + 1 :])
|
188 |
+
|
189 |
+
return clean_chunks
|
190 |
+
|
191 |
+
##############
|
192 |
+
# Fast C++
|
193 |
+
##############
|
194 |
+
|
195 |
+
def register_contaminant_cpp(self, dirt_string) -> None:
|
196 |
+
self.dirt_ngrams.update(
|
197 |
+
janitor_util.clean_ngram(dirt_string, self.delete_chars, self.ngram_n)
|
198 |
+
)
|
199 |
+
|
200 |
+
def clean_cpp(self, dirty_string: str) -> List[str]:
|
201 |
+
contamination_indices = janitor_util.clean_ngram_with_indices(
|
202 |
+
dirty_string, self.delete_chars, self.ngram_n
|
203 |
+
)
|
204 |
+
return self._split_chunks(dirty_string, contamination_indices)
|
205 |
+
|
206 |
+
##############
|
207 |
+
# Slow python
|
208 |
+
##############
|
209 |
+
|
210 |
+
def normalize_string(self, s: str) -> str:
|
211 |
+
return s.translate(self.translation_table)
|
212 |
+
|
213 |
+
def register_contaminant_python(self, dirt_string: str) -> None:
|
214 |
+
self.dirt_ngrams.update(
|
215 |
+
word_ngrams(self.normalize_string(dirt_string), self.ngram_n)
|
216 |
+
)
|
217 |
+
|
218 |
+
def clean_python(self, dirty_string: str) -> List[str]:
|
219 |
+
contamination_indices = (
|
220 |
+
(None, *idx_pair)
|
221 |
+
for dirty_ngram, idx_pair in word_ngrams_indices(dirty_string, self.ngram_n)
|
222 |
+
if self.normalize_string(dirty_ngram) in self.dirt_ngrams
|
223 |
+
)
|
224 |
+
return self._split_chunks(dirty_string, contamination_indices)
|
225 |
+
|
226 |
+
|
227 |
+
##################################################################
|
228 |
+
# Tests
|
229 |
+
#################################################################
|
230 |
+
|
231 |
+
# def print_cpp():
|
232 |
+
# source = """ ,, I'm a very !dirty,, ,, dirty boy. Clean me daddy. \n\nhe he he hehe heh. lastword """ * 2
|
233 |
+
|
234 |
+
# for i in range(1, 10, 2):
|
235 |
+
# pprint(janitor_util.clean_ngram(source, string.punctuation, i))
|
236 |
+
# for ngram, start, end in \
|
237 |
+
# janitor_util.clean_ngram_with_indices(source, string.punctuation, i):
|
238 |
+
# print(ngram, "\t", start, end, source[start:end].replace("\n", "\\n"))
|
239 |
+
|
240 |
+
|
241 |
+
# def test_cpp():
|
242 |
+
# source = """ ,, I'm a very !dirty,, ,, dirty boy. Clean me daddy. \n\nhe he he hehe heh. lastword """ * 2
|
243 |
+
# contaminant = "dirty boy. Clean he he"
|
244 |
+
|
245 |
+
# jan_python = Janitor()
|
246 |
+
# jan_cpp = Janitor()
|
247 |
+
|
248 |
+
# jan_python.register_contaminant_python(contaminant)
|
249 |
+
# jan_cpp.register_contaminant(contaminant)
|
250 |
+
|
251 |
+
# assert jan_python.dirt_ngrams == jan_cpp.dirt_ngrams, (jan_python.dirt_ngrams, jan_cpp.dirt_ngrams)
|
252 |
+
|
253 |
+
# assert jan_python.clean_python(source) == jan_cpp.clean(source), \
|
254 |
+
# (jan_python.clean_python(source), jan_cpp.clean(source))
|
255 |
+
|
256 |
+
# print("Passed test, python==cpp")
|
257 |
+
|
258 |
+
|
259 |
+
# def benchmark():
|
260 |
+
# # Download and put in data folder: enwik8 (100 MB) from https://cs.fit.edu/~mmahoney/compression/textdata.html
|
261 |
+
# setup = \
|
262 |
+
# """
|
263 |
+
# with open("data/enwik8", "r") as f:
|
264 |
+
# data = f.read()
|
265 |
+
# jan = Janitor(too_dirty_cutoff=1000)
|
266 |
+
# jan.register_contaminant('''
|
267 |
+
# theories is that there is a connection between "geekdom" and autism.
|
268 |
+
# This is hinted, for instance, by a ''Wired Magazine'' article in 2001 entitled "
|
269 |
+
# The [[Geek]] Syndrome", which is a point argued by many in the autism rights
|
270 |
+
# movement{{ref|Wired}}. This article, many professionals assert, is just one example of
|
271 |
+
# the media's application of mental disease labels to what is actually variant normal behavior
|
272 |
+
# &mdash;they argue that shyness, lack of athletic ability or social skills, and intellectual
|
273 |
+
# interests, even when they seem unusual to others, are not in themselves signs of autism or
|
274 |
+
# Asperger's syndrome. Others assert that it is actually the medical profession which is applying
|
275 |
+
# mental disease labels to children who in the past would have simply been accepted as a little
|
276 |
+
# different or even labeled 'gifted'. See [[clinomorphism]] for further discussion of this issue.
|
277 |
+
# Due to the recent publicity surrounding autism and autis
|
278 |
+
# ultan Al Nahyan]] granted [[Petroleum]] concessions, and oil was first found in 1958. At first,
|
279 |
+
# oil money had a marginal impact. A few lowrise concete buildings were erected, and the first
|
280 |
+
# paved road was completed in 1961, but Sheikh Shakbut, uncertain whether the new oil royalties
|
281 |
+
# would last, took a cautious approach, preferring to save the revenue rather than investing it in
|
282 |
+
# development. His brother, [[Zayed bin Sultan Al Nahayan]], saw that oil wealth had the potential
|
283 |
+
# to transform Abu Dhabi. The ruling Al Nahayan family decided that Sheikh Zayed should replace his
|
284 |
+
# brother as Ruler and carry out his vision of developing the country. On [[August 6]], [[1966]],
|
285 |
+
# with the assistance of the British, Sheikh Zayed became the new ruler. See generally, Al-Fahim, M,
|
286 |
+
# ''From Rags to Riches: A Story of Abu Dhabi'', Chapter Six (London Centre of Arab Studies, 1995),
|
287 |
+
# ISBN 1 900404 00 1. With the announcement by Britain in 1968 that it would withdraw from the
|
288 |
+
# Gulf area by 1971, Sheikh Zayed became the main driving force behind the formation of the
|
289 |
+
# [[United Arab Emirates]]. After the Emirates gained independence in 1971,
|
290 |
+
# ''')
|
291 |
+
# """
|
292 |
+
|
293 |
+
# n = 1
|
294 |
+
# print(f"Timing {n} run on 100 MB")
|
295 |
+
# print("Register contaminant")
|
296 |
+
# # print("\tPython", timeit.timeit("jan.register_contaminant_python(data)", setup=setup, globals=globals(), number=n))
|
297 |
+
# print("\tCpp", timeit.timeit("jan.register_contaminant(data)", setup=setup, globals=globals(), number=n))
|
298 |
+
|
299 |
+
# print("Clean")
|
300 |
+
# # print("\tPython", timeit.timeit("jan.clean_python(data)", setup=setup, globals=globals(), number=n))
|
301 |
+
# print("\tCpp", timeit.timeit("jan.clean(data)", setup=setup, globals=globals(), number=n))
|
302 |
+
|
303 |
+
|
304 |
+
# def test_janitor_general():
|
305 |
+
# source = """ ,, I'm a very !dirty,, ,, dirty boy. Clean me daddy. \n\nhe he he hehe heh. lastword """ * 2
|
306 |
+
# contaminant = "dirty boy. Clean he he"
|
307 |
+
|
308 |
+
# jan = Janitor(ngram_n=3)
|
309 |
+
# jan.register_contaminant(contaminant)
|
310 |
+
# cleaned = " ".join(jan.clean(source))
|
311 |
+
# for contam in jan.dirt_ngrams:
|
312 |
+
# assert contam not in cleaned, contam
|
313 |
+
|
314 |
+
# filename = "data/saved_contam"
|
315 |
+
# jan.save_contamination_ngrams(filename)
|
316 |
+
|
317 |
+
# jan = Janitor(ngram_n=3)
|
318 |
+
# jan.load_contamination_ngrams(filename)
|
319 |
+
# cleaned = " ".join(jan.clean(source))
|
320 |
+
# for contam in jan.dirt_ngrams:
|
321 |
+
# assert contam not in cleaned, contam
|
322 |
+
|
323 |
+
|
324 |
+
# if __name__ == "__main__":
|
325 |
+
# test()
|
326 |
+
# # print_cpp()
|
327 |
+
# # test_cpp()
|
328 |
+
# # benchmark()
|
lm-evaluation/lm_eval/filters/__init__.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import partial
|
2 |
+
from typing import List, Union
|
3 |
+
|
4 |
+
from lm_eval.api.filter import FilterEnsemble
|
5 |
+
|
6 |
+
from . import extraction, selection, transformation
|
7 |
+
|
8 |
+
|
9 |
+
FILTER_REGISTRY = {
|
10 |
+
"take_first": selection.TakeFirstFilter,
|
11 |
+
"regex": extraction.RegexFilter,
|
12 |
+
"majority_vote": selection.MajorityVoteFilter,
|
13 |
+
"take_first_k": selection.TakeKFilter,
|
14 |
+
"remove_whitespace": extraction.WhitespaceFilter,
|
15 |
+
"lowercase": transformation.LowercaseFilter,
|
16 |
+
"uppercase": transformation.UppercaseFilter,
|
17 |
+
"map": transformation.MapFilter,
|
18 |
+
"multi_choice_regex": extraction.MultiChoiceRegexFilter,
|
19 |
+
# TODO: implement this filter. either it should take in an arbitrary "scoring"/reward function
|
20 |
+
# that takes an input and returns a scalar and then should select the max reward,
|
21 |
+
# or should implement different filters for different ways of handling a reward model's inference.
|
22 |
+
# "arg_max": selection.ArgMaxFilter,
|
23 |
+
}
|
24 |
+
|
25 |
+
|
26 |
+
def get_filter(filter_name: str) -> Union[type, str]:
|
27 |
+
if filter_name in FILTER_REGISTRY:
|
28 |
+
return FILTER_REGISTRY[filter_name]
|
29 |
+
else:
|
30 |
+
return filter_name
|
31 |
+
|
32 |
+
|
33 |
+
def build_filter_ensemble(
|
34 |
+
filter_name: str, components: List[List[str]]
|
35 |
+
) -> FilterEnsemble:
|
36 |
+
"""
|
37 |
+
Create a filtering pipeline.
|
38 |
+
"""
|
39 |
+
filters = []
|
40 |
+
for function, kwargs in components:
|
41 |
+
if kwargs is None:
|
42 |
+
kwargs = {}
|
43 |
+
# create a filter given its name in the registry
|
44 |
+
f = partial(get_filter(function), **kwargs)
|
45 |
+
# add the filter as a pipeline step
|
46 |
+
filters.append(f)
|
47 |
+
|
48 |
+
return FilterEnsemble(name=filter_name, filters=filters)
|
lm-evaluation/lm_eval/filters/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.27 kB). View file
|
|
lm-evaluation/lm_eval/filters/__pycache__/extraction.cpython-310.pyc
ADDED
Binary file (5.89 kB). View file
|
|
lm-evaluation/lm_eval/filters/__pycache__/selection.cpython-310.pyc
ADDED
Binary file (2.76 kB). View file
|
|
lm-evaluation/lm_eval/filters/__pycache__/transformation.cpython-310.pyc
ADDED
Binary file (3.32 kB). View file
|
|
lm-evaluation/lm_eval/filters/decontamination.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from lm_eval.api.filter import Filter
|
2 |
+
|
3 |
+
|
4 |
+
class DecontaminationFilter(Filter):
|
5 |
+
|
6 |
+
"""
|
7 |
+
A filter which evaluates
|
8 |
+
"""
|
9 |
+
|
10 |
+
name = "track_decontamination"
|
11 |
+
|
12 |
+
def __init__(self, path) -> None:
|
13 |
+
"""
|
14 |
+
|
15 |
+
TODO: make sure only ever run one time on the train set (should this be cached as a class var? keyed by value for "path").
|
16 |
+
should further cache result on a given (task_name, doc_id)
|
17 |
+
"""
|
18 |
+
self._decontam_results = None
|
19 |
+
|
20 |
+
def apply(self, resps, docs) -> None:
|
21 |
+
"""
|
22 |
+
Return {"no_contamination", "only_contamination"} keys for the 2 different subsets
|
23 |
+
"""
|
24 |
+
pass
|
lm-evaluation/lm_eval/filters/extraction.py
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import sys
|
3 |
+
import unicodedata
|
4 |
+
|
5 |
+
from lm_eval.api.filter import Filter
|
6 |
+
|
7 |
+
|
8 |
+
class RegexFilter(Filter):
|
9 |
+
""" """
|
10 |
+
|
11 |
+
def __init__(
|
12 |
+
self,
|
13 |
+
regex_pattern: str = r"#### (\-?[0-9\.\,]+)",
|
14 |
+
group_select=0,
|
15 |
+
fallback: str = "[invalid]",
|
16 |
+
) -> None:
|
17 |
+
"""
|
18 |
+
pass a string `regex` to run `re.compile(r"regex")` on.
|
19 |
+
`fallback` defines the output returned if no matches for the regex are located.
|
20 |
+
"""
|
21 |
+
self.regex_pattern = regex_pattern
|
22 |
+
self.regex = re.compile(regex_pattern)
|
23 |
+
self.group_select = group_select
|
24 |
+
self.fallback = fallback
|
25 |
+
|
26 |
+
def apply(self, resps, docs):
|
27 |
+
# here, we assume we have a list, in which each element is
|
28 |
+
# a list of model responses for some particular input/target pair.
|
29 |
+
# so we process each of these (same input/target response sets)
|
30 |
+
# independently (and keep them a list.)
|
31 |
+
def filter_set(inst):
|
32 |
+
filtered = []
|
33 |
+
for resp in inst:
|
34 |
+
match = self.regex.findall(resp)
|
35 |
+
if match:
|
36 |
+
match = match[self.group_select]
|
37 |
+
if isinstance(match, tuple):
|
38 |
+
match = [m for m in match if m][0]
|
39 |
+
match = match.strip()
|
40 |
+
else:
|
41 |
+
match = self.fallback
|
42 |
+
filtered.append(match)
|
43 |
+
return filtered
|
44 |
+
|
45 |
+
# print(resps)
|
46 |
+
filtered_resps = list(map(lambda x: filter_set(x), resps))
|
47 |
+
# print(filtered_resps)
|
48 |
+
|
49 |
+
return filtered_resps
|
50 |
+
|
51 |
+
|
52 |
+
class WhitespaceFilter(Filter):
|
53 |
+
""" """
|
54 |
+
|
55 |
+
def __init__(self) -> None:
|
56 |
+
pass
|
57 |
+
|
58 |
+
def apply(self, resps, docs):
|
59 |
+
def filter_set(inst):
|
60 |
+
filtered_resp = []
|
61 |
+
for resp in inst:
|
62 |
+
if resp.startswith(" "):
|
63 |
+
resp = resp[1:]
|
64 |
+
|
65 |
+
filtered_resp.append(resp)
|
66 |
+
|
67 |
+
return filtered_resp
|
68 |
+
|
69 |
+
filtered_resps = [filter_set(resp) for resp in resps]
|
70 |
+
|
71 |
+
return filtered_resps
|
72 |
+
|
73 |
+
|
74 |
+
class MultiChoiceRegexFilter(RegexFilter):
|
75 |
+
"""
|
76 |
+
A filter used to extract a model's answer on multiple choice questions with
|
77 |
+
letter answers. assumes each document has a "choices" field
|
78 |
+
containing the list of answer choices and that the answer label symbols
|
79 |
+
are of the form (A), (B), (C), ... or A, B, C.
|
80 |
+
"""
|
81 |
+
|
82 |
+
def __init__(
|
83 |
+
self,
|
84 |
+
regex_pattern: str = r"#### (\-?[0-9\.\,]+)",
|
85 |
+
group_select=0,
|
86 |
+
fallback: str = "[invalid]",
|
87 |
+
ignore_case=False,
|
88 |
+
ignore_punctuation=False,
|
89 |
+
regexes_to_ignore=None,
|
90 |
+
) -> None:
|
91 |
+
"""
|
92 |
+
regex_pattern: The basic regex pattern to use. If fails to match, we will use the customized match procedure
|
93 |
+
- step 1 : We parse the choices between ([A-Z])s then try to find these choices in the response.
|
94 |
+
- step 2 : We parse the choice with regex :[\s]*([A-?]), where ? varies by number of choices.
|
95 |
+
group_select: Selects the (group_select)th match from the findall result.
|
96 |
+
ignore_case: Ignores the case during step 1 matching
|
97 |
+
ignore_punctuation: Remove the punctuation during step 1 matching
|
98 |
+
regexes_to_ignore: Remove these regexes during step 1 matching
|
99 |
+
"""
|
100 |
+
super().__init__(regex_pattern, group_select, fallback)
|
101 |
+
self.ignore_case = ignore_case
|
102 |
+
self.ignore_punctuation = ignore_punctuation
|
103 |
+
self.regexes_to_ignore = regexes_to_ignore
|
104 |
+
|
105 |
+
def apply(self, resps, docs):
|
106 |
+
# here, we assume we have a list, in which each element is
|
107 |
+
# a list of model responses for some particular input/target pair.
|
108 |
+
# so we process each of these (same input/target response sets)
|
109 |
+
# independently (and keep them a list.)
|
110 |
+
|
111 |
+
def find_match(regex, resp, convert_dict={}):
|
112 |
+
match = regex.findall(resp)
|
113 |
+
if match:
|
114 |
+
match = match[self.group_select]
|
115 |
+
if isinstance(match, tuple):
|
116 |
+
match = [m for m in match if m][0]
|
117 |
+
match = match.strip()
|
118 |
+
if match and match in convert_dict:
|
119 |
+
match = convert_dict[match]
|
120 |
+
return match
|
121 |
+
|
122 |
+
punct_tbl = dict.fromkeys(
|
123 |
+
i
|
124 |
+
for i in range(sys.maxunicode)
|
125 |
+
if unicodedata.category(chr(i)).startswith("P")
|
126 |
+
)
|
127 |
+
|
128 |
+
def filter_ignores(st):
|
129 |
+
if self.regexes_to_ignore is not None:
|
130 |
+
for s in self.regexes_to_ignore:
|
131 |
+
st = re.sub(s, "", st)
|
132 |
+
|
133 |
+
if self.ignore_case:
|
134 |
+
st = st.lower()
|
135 |
+
|
136 |
+
if self.ignore_punctuation:
|
137 |
+
# https://stackoverflow.com/a/266162
|
138 |
+
st = st.translate(punct_tbl)
|
139 |
+
return st
|
140 |
+
|
141 |
+
filtered_resps = []
|
142 |
+
|
143 |
+
for r, doc in zip(resps, docs):
|
144 |
+
fallback_regexes = []
|
145 |
+
choice_to_alpha = {}
|
146 |
+
next_alpha = "A"
|
147 |
+
|
148 |
+
without_paren_fallback_regexes = []
|
149 |
+
without_paren_to_target = {}
|
150 |
+
|
151 |
+
choices = doc["choices"]
|
152 |
+
for c in choices:
|
153 |
+
m = filter_ignores(c.strip())
|
154 |
+
fallback_regexes.append(f"{re.escape(m)}")
|
155 |
+
choice_to_alpha[m] = f"({next_alpha})"
|
156 |
+
|
157 |
+
without_paren_fallback_regexes.append(next_alpha)
|
158 |
+
without_paren_to_target[next_alpha] = f"({next_alpha})"
|
159 |
+
|
160 |
+
next_alpha = chr(ord(next_alpha) + 1)
|
161 |
+
fallback_regex = re.compile("|".join(fallback_regexes))
|
162 |
+
without_paren_fallback_regex = "|".join(without_paren_fallback_regexes)
|
163 |
+
without_paren_fallback_regex = re.compile(
|
164 |
+
f":[\s]*({without_paren_fallback_regex})"
|
165 |
+
)
|
166 |
+
|
167 |
+
filtered = []
|
168 |
+
for resp in r:
|
169 |
+
match = find_match(self.regex, resp)
|
170 |
+
if not match:
|
171 |
+
match = find_match(
|
172 |
+
fallback_regex, filter_ignores(resp), choice_to_alpha
|
173 |
+
)
|
174 |
+
if not match:
|
175 |
+
match = find_match(
|
176 |
+
without_paren_fallback_regex, resp, without_paren_to_target
|
177 |
+
)
|
178 |
+
if not match:
|
179 |
+
match = self.fallback
|
180 |
+
filtered.append(match)
|
181 |
+
filtered_resps.append(filtered)
|
182 |
+
|
183 |
+
return filtered_resps
|
lm-evaluation/lm_eval/filters/selection.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections import Counter
|
2 |
+
|
3 |
+
from lm_eval.api.filter import Filter
|
4 |
+
|
5 |
+
|
6 |
+
class TakeFirstFilter(Filter):
|
7 |
+
def __init__(self) -> None:
|
8 |
+
"""
|
9 |
+
Can define custom behavior here, if an individual instantiation of a Filter class should have state.
|
10 |
+
"""
|
11 |
+
|
12 |
+
def apply(self, resps, docs):
|
13 |
+
"""
|
14 |
+
Assuming each entry of `resps` is a list of model responses, we discard all but the first response.
|
15 |
+
"""
|
16 |
+
return map(lambda r: r[0], resps)
|
17 |
+
|
18 |
+
|
19 |
+
class TakeKFilter(Filter):
|
20 |
+
def __init__(self, **kwargs) -> None:
|
21 |
+
self.k = kwargs.pop("k")
|
22 |
+
|
23 |
+
super().__init__(**kwargs)
|
24 |
+
|
25 |
+
def apply(self, resps, docs):
|
26 |
+
# need resp to be subscriptable to check below
|
27 |
+
resps = list(resps)
|
28 |
+
# check we have at least k responses per doc, else we can't take the first k
|
29 |
+
assert (
|
30 |
+
len(resps[0]) >= self.k
|
31 |
+
), f"Need at least {self.k} responses per doc to take first {self.k}, but got {len(resps[0])} only! Please increase TaskConfig.repeats ."
|
32 |
+
return map(lambda r: r[: self.k], resps)
|
33 |
+
|
34 |
+
|
35 |
+
class MajorityVoteFilter(Filter):
|
36 |
+
def __init__(self) -> None:
|
37 |
+
"""
|
38 |
+
Can define custom behavior here, if an individual instantiation of a Filter class should have state.
|
39 |
+
"""
|
40 |
+
|
41 |
+
def apply(self, resps, docs):
|
42 |
+
"""
|
43 |
+
Each entry of `resps` is a list of model responses.
|
44 |
+
We select the response that occurs most frequently in each entry of `resps`.
|
45 |
+
"""
|
46 |
+
|
47 |
+
def select_majority(resp):
|
48 |
+
counts = Counter(resp)
|
49 |
+
vote = counts.most_common(1)[0][0]
|
50 |
+
return vote
|
51 |
+
|
52 |
+
return map(lambda r: [select_majority(r)], resps)
|
lm-evaluation/lm_eval/filters/transformation.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from lm_eval.api.filter import Filter
|
2 |
+
|
3 |
+
|
4 |
+
class LowercaseFilter(Filter):
|
5 |
+
def __init__(self) -> None:
|
6 |
+
pass
|
7 |
+
|
8 |
+
def apply(self, resps, docs):
|
9 |
+
def filter_set(inst):
|
10 |
+
return [resp.lower() for resp in inst]
|
11 |
+
|
12 |
+
return [filter_set(resp) for resp in resps]
|
13 |
+
|
14 |
+
|
15 |
+
class UppercaseFilter(Filter):
|
16 |
+
def __init__(self) -> None:
|
17 |
+
pass
|
18 |
+
|
19 |
+
def apply(self, resps, docs):
|
20 |
+
def filter_set(inst):
|
21 |
+
return [resp.upper() for resp in inst]
|
22 |
+
|
23 |
+
return [filter_set(resp) for resp in resps]
|
24 |
+
|
25 |
+
|
26 |
+
class MapFilter(Filter):
|
27 |
+
def __init__(self, mapping_dict: dict = None, default_value=None) -> None:
|
28 |
+
"""
|
29 |
+
Initializes the MapFilter with a given mapping dictionary and default value.
|
30 |
+
|
31 |
+
Args:
|
32 |
+
- mapping_dict (dict): A dictionary containing the key-value mappings.
|
33 |
+
Default is an empty dictionary.
|
34 |
+
- default_value (Any): The value to be returned when a key is not found in the mapping_dict.
|
35 |
+
Default is None.
|
36 |
+
|
37 |
+
Example:
|
38 |
+
mapper = MapFilter({'A': 1, 'B': 2}, default_value=0)
|
39 |
+
"""
|
40 |
+
if mapping_dict is None:
|
41 |
+
mapping_dict = {}
|
42 |
+
assert isinstance(
|
43 |
+
mapping_dict, dict
|
44 |
+
), "Provided mapping_dict is not a dictionary"
|
45 |
+
self.mapping_dict = mapping_dict
|
46 |
+
self.default_value = default_value
|
47 |
+
|
48 |
+
def apply(self, resps, docs):
|
49 |
+
def filter_set(inst):
|
50 |
+
return [self.mapping_dict.get(resp, self.default_value) for resp in inst]
|
51 |
+
|
52 |
+
return [filter_set(resp) for resp in resps]
|
lm-evaluation/lm_eval/models/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (585 Bytes). View file
|
|
lm-evaluation/lm_eval/tasks/kormedmcqa/README.md
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# KorMedMCQA
|
2 |
+
|
3 |
+
### Paper
|
4 |
+
|
5 |
+
Title: `KorMedMCQA: Multi-Choice Question Answering Benchmark for Korean Healthcare Professional Licensing Examinations`
|
6 |
+
|
7 |
+
Abstract: `We introduce KorMedMCQA, the first Korean multiple-choice question answering (MCQA) benchmark derived from Korean healthcare professional licensing examinations, covering from the year 2012 to year 2023. This dataset consists of a selection of questions from the license examinations for doctors, nurses, and pharmacists, featuring a diverse array of subjects. We conduct baseline experiments on various large language models, including proprietary/open-source, multilingual/Korean-additional pretrained, and clinical context pretrained models, highlighting the potential for further enhancements. We make our data publicly available on HuggingFace and provide a evaluation script via LM-Harness, inviting further exploration and advancement in Korean healthcare environments.`
|
8 |
+
|
9 |
+
|
10 |
+
Paper : https://arxiv.org/abs/2403.01469
|
11 |
+
|
12 |
+
Homepage: https://huggingface.co/datasets/sean0042/KorMedMCQA
|
13 |
+
|
14 |
+
|
15 |
+
### Citation
|
16 |
+
|
17 |
+
```
|
18 |
+
@article{kweon2024kormedmcqa,
|
19 |
+
title={KorMedMCQA: Multi-Choice Question Answering Benchmark for Korean Healthcare Professional Licensing Examinations},
|
20 |
+
author={Sunjun Kweon and Byungjin Choi and Minkyu Kim and Rae Woong Park and Edward Choi},
|
21 |
+
journal={arXiv preprint arXiv:2403.01469},
|
22 |
+
year={2024}
|
23 |
+
}
|
24 |
+
```
|
25 |
+
|
26 |
+
### Groups and Tasks
|
27 |
+
|
28 |
+
* `kormedmcqa`: Runs `kormedmcqa_doctor`, `kormedmcqa_nurse`, and `kormedmcqa_pharm`.
|
29 |
+
|
30 |
+
#### Tasks
|
31 |
+
|
32 |
+
* `kormedmcqa_doctor`: `Official Korean Doctor Examination`
|
33 |
+
* `kormedmcqa_nurse`: `Official Korean Nurse Examination`
|
34 |
+
* `kormedmcqa_pharm`: `Official Korean Pharmacist Examination`
|
35 |
+
|
36 |
+
### Checklist
|
37 |
+
|
38 |
+
For adding novel benchmarks/datasets to the library:
|
39 |
+
* [x] Is the task an existing benchmark in the literature?
|
40 |
+
* [x] Have you referenced the original paper that introduced the task?
|
41 |
+
* [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
|
42 |
+
|
43 |
+
|
44 |
+
If other tasks on this dataset are already supported:
|
45 |
+
* [ ] Is the "Main" variant of this task clearly denoted?
|
46 |
+
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
47 |
+
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
|
lm-evaluation/lm_eval/tasks/kormedmcqa/kormedmcqa_doctor.yaml
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: kormedmcqa
|
2 |
+
task : kormedmcqa_doctor
|
3 |
+
dataset_path : sean0042/KorMedMCQA
|
4 |
+
dataset_name : doctor
|
5 |
+
test_split : test
|
6 |
+
fewshot_split : dev
|
7 |
+
fewshot_config:
|
8 |
+
sampler: first_n
|
9 |
+
output_type: generate_until
|
10 |
+
doc_to_text: "{{question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nE. {{E}}\n정답:"
|
11 |
+
doc_to_target: "{{['A', 'B', 'C', 'D', 'E'][answer-1]}}"
|
12 |
+
metric_list:
|
13 |
+
- metric: exact_match
|
14 |
+
aggregation: mean
|
15 |
+
higher_is_better: true
|
16 |
+
ignore_case: true
|
17 |
+
ignore_punctuation: true
|
18 |
+
regexes_to_ignore:
|
19 |
+
- " "
|
20 |
+
generation_kwargs:
|
21 |
+
until:
|
22 |
+
- "Q:"
|
23 |
+
- "\n\n"
|
24 |
+
- "</s>"
|
25 |
+
- "."
|
26 |
+
do_sample: false
|
27 |
+
temperature: 0.0
|
lm-evaluation/lm_eval/tasks/kormedmcqa/kormedmcqa_nurse.yaml
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: kormedmcqa
|
2 |
+
task : kormedmcqa_nurse
|
3 |
+
dataset_path : sean0042/KorMedMCQA
|
4 |
+
dataset_name : nurse
|
5 |
+
test_split : test
|
6 |
+
fewshot_split : dev
|
7 |
+
fewshot_config:
|
8 |
+
sampler: first_n
|
9 |
+
output_type: generate_until
|
10 |
+
doc_to_text: "{{question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nE. {{E}}\n정답:"
|
11 |
+
doc_to_target: "{{['A', 'B', 'C', 'D', 'E'][answer-1]}}"
|
12 |
+
metric_list:
|
13 |
+
- metric: exact_match
|
14 |
+
aggregation: mean
|
15 |
+
higher_is_better: true
|
16 |
+
ignore_case: true
|
17 |
+
ignore_punctuation: true
|
18 |
+
regexes_to_ignore:
|
19 |
+
- " "
|
20 |
+
generation_kwargs:
|
21 |
+
until:
|
22 |
+
- "Q:"
|
23 |
+
- "\n\n"
|
24 |
+
- "</s>"
|
25 |
+
- "."
|
26 |
+
do_sample: false
|
27 |
+
temperature: 0.0
|
lm-evaluation/lm_eval/tasks/kormedmcqa/kormedmcqa_pharm.yaml
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: kormedmcqa
|
2 |
+
task : kormedmcqa_pharm
|
3 |
+
dataset_path : sean0042/KorMedMCQA
|
4 |
+
dataset_name : pharm
|
5 |
+
test_split : test
|
6 |
+
fewshot_split : dev
|
7 |
+
fewshot_config:
|
8 |
+
sampler: first_n
|
9 |
+
output_type: generate_until
|
10 |
+
doc_to_text: "{{question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nE. {{E}}\n정답:"
|
11 |
+
doc_to_target: "{{['A', 'B', 'C', 'D', 'E'][answer-1]}}"
|
12 |
+
metric_list:
|
13 |
+
- metric: exact_match
|
14 |
+
aggregation: mean
|
15 |
+
higher_is_better: true
|
16 |
+
ignore_case: true
|
17 |
+
ignore_punctuation: true
|
18 |
+
regexes_to_ignore:
|
19 |
+
- " "
|
20 |
+
generation_kwargs:
|
21 |
+
until:
|
22 |
+
- "Q:"
|
23 |
+
- "\n\n"
|
24 |
+
- "</s>"
|
25 |
+
- "."
|
26 |
+
do_sample: false
|
27 |
+
temperature: 0.0
|
lm-evaluation/lm_eval/tasks/pile/README.md
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# The Pile
|
2 |
+
|
3 |
+
### Paper
|
4 |
+
Title: The Pile: An 800GB Dataset of Diverse Text for Language Modeling
|
5 |
+
|
6 |
+
Abstract: https://arxiv.org/abs/2101.00027
|
7 |
+
|
8 |
+
The Pile is a 825 GiB diverse, open source language modelling data set that consists
|
9 |
+
of 22 smaller, high-quality datasets combined together. To score well on Pile
|
10 |
+
BPB (bits per byte), a model must be able to understand many disparate domains
|
11 |
+
including books, github repositories, webpages, chat logs, and medical, physics,
|
12 |
+
math, computer science, and philosophy papers.
|
13 |
+
|
14 |
+
Homepage: https://pile.eleuther.ai/
|
15 |
+
|
16 |
+
### Citation
|
17 |
+
```
|
18 |
+
@article{pile,
|
19 |
+
title={The {P}ile: An 800GB Dataset of Diverse Text for Language Modeling},
|
20 |
+
author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and Presser, Shawn and Leahy, Connor},
|
21 |
+
journal={arXiv preprint arXiv:2101.00027},
|
22 |
+
year={2020}
|
23 |
+
}
|
24 |
+
```
|
25 |
+
|
26 |
+
### Groups and Tasks
|
27 |
+
|
28 |
+
#### Groups
|
29 |
+
|
30 |
+
* `pile`
|
31 |
+
|
32 |
+
#### Tasks
|
33 |
+
|
34 |
+
* `pile_arxiv`
|
35 |
+
* `pile_bookcorpus2`
|
36 |
+
* `pile_books3`
|
37 |
+
* `pile_dm-mathematics`
|
38 |
+
* `pile_enron`
|
39 |
+
* `pile_europarl`
|
40 |
+
* `pile_freelaw`
|
41 |
+
* `pile_github`
|
42 |
+
* `pile_gutenberg`
|
43 |
+
* `pile_hackernews`
|
44 |
+
* `pile_nih-exporter`
|
45 |
+
* `pile_opensubtitles`
|
46 |
+
* `pile_openwebtext2`
|
47 |
+
* `pile_philpapers`
|
48 |
+
* `pile_pile-cc`
|
49 |
+
* `pile_pubmed-abstracts`
|
50 |
+
* `pile_pubmed-central`
|
51 |
+
* `pile_stackexchange`
|
52 |
+
* `pile_ubuntu-irc`
|
53 |
+
* `pile_uspto`
|
54 |
+
* `pile_wikipedia`
|
55 |
+
* `pile_youtubesubtitles`
|
56 |
+
|
57 |
+
### Checklist
|
58 |
+
|
59 |
+
For adding novel benchmarks/datasets to the library:
|
60 |
+
* [ ] Is the task an existing benchmark in the literature?
|
61 |
+
* [ ] Have you referenced the original paper that introduced the task?
|
62 |
+
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
|
63 |
+
|
64 |
+
|
65 |
+
If other tasks on this dataset are already supported:
|
66 |
+
* [ ] Is the "Main" variant of this task clearly denoted?
|
67 |
+
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
68 |
+
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
|
lm-evaluation/lm_eval/tasks/pile/pile_arxiv.yaml
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group:
|
2 |
+
- pile
|
3 |
+
task: pile_arxiv
|
4 |
+
dataset_path: EleutherAI/pile
|
5 |
+
dataset_name: pile_arxiv
|
6 |
+
output_type: loglikelihood_rolling
|
7 |
+
test_split: train
|
8 |
+
doc_to_text: ""
|
9 |
+
doc_to_target: "{{text}}"
|
10 |
+
should_decontaminate: true
|
11 |
+
doc_to_decontamination_query: "{{text}}"
|
12 |
+
metric_list:
|
13 |
+
- metric: word_perplexity
|
14 |
+
aggregation: weighted_perplexity
|
15 |
+
higher_is_better: false
|
16 |
+
- metric: byte_perplexity
|
17 |
+
aggregation: weighted_perplexity
|
18 |
+
higher_is_better: false
|
19 |
+
- metric: bits_per_byte
|
20 |
+
aggregation: bits_per_byte
|
21 |
+
higher_is_better: false
|
22 |
+
metadata:
|
23 |
+
version: 2.0
|
lm-evaluation/lm_eval/tasks/pile/pile_bookcorpus2.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
include: pile_arxiv.yaml
|
2 |
+
task: pile_bookcorpus2
|
3 |
+
dataset_name: pile_bookcorpus2
|
lm-evaluation/lm_eval/tasks/pile/pile_books3.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
include: pile_arxiv.yaml
|
2 |
+
task: pile_books3
|
3 |
+
dataset_name: pile_books3
|
lm-evaluation/lm_eval/tasks/pile/pile_dm-mathematics.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
include: pile_arxiv.yaml
|
2 |
+
task: pile_dm-mathematics
|
3 |
+
dataset_name: pile_dm-mathematics
|
lm-evaluation/lm_eval/tasks/pile/pile_freelaw.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
include: pile_arxiv.yaml
|
2 |
+
task: pile_freelaw
|
3 |
+
dataset_name: pile_freelaw
|
lm-evaluation/lm_eval/tasks/pile/pile_github.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
include: pile_arxiv.yaml
|
2 |
+
task: pile_github
|
3 |
+
dataset_name: pile_github
|
lm-evaluation/lm_eval/tasks/pile/pile_gutenberg.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
include: pile_arxiv.yaml
|
2 |
+
task: pile_gutenberg
|
3 |
+
dataset_name: pile_gutenberg
|
lm-evaluation/lm_eval/tasks/pile/pile_hackernews.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
include: pile_arxiv.yaml
|
2 |
+
task: pile_hackernews
|
3 |
+
dataset_name: pile_hackernews
|
lm-evaluation/lm_eval/tasks/pile/pile_nih-exporter.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
include: pile_arxiv.yaml
|
2 |
+
task: pile_nih-exporter
|
3 |
+
dataset_name: pile_nih-exporter
|
lm-evaluation/lm_eval/tasks/pile/pile_opensubtitles.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
include: pile_arxiv.yaml
|
2 |
+
task: pile_opensubtitles
|
3 |
+
dataset_name: pile_opensubtitles
|
lm-evaluation/lm_eval/tasks/pile/pile_openwebtext2.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
include: pile_arxiv.yaml
|
2 |
+
task: pile_openwebtext2
|
3 |
+
dataset_name: pile_openwebtext2
|
lm-evaluation/lm_eval/tasks/pile/pile_philpapers.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
include: pile_arxiv.yaml
|
2 |
+
task: pile_philpapers
|
3 |
+
dataset_name: pile_philpapers
|
lm-evaluation/lm_eval/tasks/pile/pile_pile-cc.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
include: pile_arxiv.yaml
|
2 |
+
task: pile_pile-cc
|
3 |
+
dataset_name: pile_pile-cc
|
lm-evaluation/lm_eval/tasks/pile/pile_pubmed-abstracts.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
include: pile_arxiv.yaml
|
2 |
+
task: pile_pubmed-abstracts
|
3 |
+
dataset_name: pile_pubmed-abstracts
|