applied-ai-018 commited on
Commit
cd0431b
·
verified ·
1 Parent(s): c48163f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. lm-evaluation-harness/lm_eval/api/__init__.py +0 -0
  3. lm-evaluation-harness/lm_eval/api/__pycache__/__init__.cpython-310.pyc +0 -0
  4. lm-evaluation-harness/lm_eval/api/__pycache__/filter.cpython-310.pyc +0 -0
  5. lm-evaluation-harness/lm_eval/api/__pycache__/instance.cpython-310.pyc +0 -0
  6. lm-evaluation-harness/lm_eval/api/__pycache__/metrics.cpython-310.pyc +0 -0
  7. lm-evaluation-harness/lm_eval/api/__pycache__/model.cpython-310.pyc +0 -0
  8. lm-evaluation-harness/lm_eval/api/__pycache__/registry.cpython-310.pyc +0 -0
  9. lm-evaluation-harness/lm_eval/api/__pycache__/samplers.cpython-310.pyc +0 -0
  10. lm-evaluation-harness/lm_eval/api/__pycache__/task.cpython-310.pyc +0 -0
  11. lm-evaluation-harness/lm_eval/api/filter.py +56 -0
  12. lm-evaluation-harness/lm_eval/api/instance.py +38 -0
  13. lm-evaluation-harness/lm_eval/api/metrics.py +509 -0
  14. lm-evaluation-harness/lm_eval/api/model.py +346 -0
  15. lm-evaluation-harness/lm_eval/api/registry.py +172 -0
  16. lm-evaluation-harness/lm_eval/api/samplers.py +114 -0
  17. lm-evaluation-harness/lm_eval/api/task.py +1498 -0
  18. lm-evaluation-harness/lm_eval/caching/__pycache__/cache.cpython-310.pyc +0 -0
  19. lm-evaluation-harness/lm_eval/caching/cache.py +55 -0
  20. lm-evaluation-harness/lm_eval/filters/__init__.py +48 -0
  21. lm-evaluation-harness/lm_eval/filters/__pycache__/__init__.cpython-310.pyc +0 -0
  22. lm-evaluation-harness/lm_eval/filters/__pycache__/extraction.cpython-310.pyc +0 -0
  23. lm-evaluation-harness/lm_eval/filters/__pycache__/selection.cpython-310.pyc +0 -0
  24. lm-evaluation-harness/lm_eval/filters/__pycache__/transformation.cpython-310.pyc +0 -0
  25. lm-evaluation-harness/lm_eval/filters/decontamination.py +24 -0
  26. lm-evaluation-harness/lm_eval/filters/extraction.py +183 -0
  27. lm-evaluation-harness/lm_eval/filters/selection.py +52 -0
  28. lm-evaluation-harness/lm_eval/filters/transformation.py +52 -0
  29. lm-evaluation-harness/lm_eval/models/__init__.py +27 -0
  30. lm-evaluation-harness/lm_eval/models/__pycache__/__init__.cpython-310.pyc +0 -0
  31. lm-evaluation-harness/lm_eval/models/__pycache__/anthropic_llms.cpython-310.pyc +0 -0
  32. lm-evaluation-harness/lm_eval/models/__pycache__/dummy.cpython-310.pyc +0 -0
  33. lm-evaluation-harness/lm_eval/models/__pycache__/gguf.cpython-310.pyc +0 -0
  34. lm-evaluation-harness/lm_eval/models/__pycache__/huggingface.cpython-310.pyc +0 -0
  35. lm-evaluation-harness/lm_eval/models/__pycache__/mamba_lm.cpython-310.pyc +0 -0
  36. lm-evaluation-harness/lm_eval/models/__pycache__/nemo_lm.cpython-310.pyc +0 -0
  37. lm-evaluation-harness/lm_eval/models/__pycache__/neuralmagic.cpython-310.pyc +0 -0
  38. lm-evaluation-harness/lm_eval/models/__pycache__/neuron_optimum.cpython-310.pyc +0 -0
  39. lm-evaluation-harness/lm_eval/models/__pycache__/openai_completions.cpython-310.pyc +0 -0
  40. lm-evaluation-harness/lm_eval/models/__pycache__/optimum_lm.cpython-310.pyc +0 -0
  41. lm-evaluation-harness/lm_eval/models/__pycache__/textsynth.cpython-310.pyc +0 -0
  42. lm-evaluation-harness/lm_eval/models/__pycache__/utils.cpython-310.pyc +0 -0
  43. lm-evaluation-harness/lm_eval/models/__pycache__/vllm_causallms.cpython-310.pyc +0 -0
  44. lm-evaluation-harness/lm_eval/models/anthropic_llms.py +360 -0
  45. lm-evaluation-harness/lm_eval/models/dummy.py +41 -0
  46. lm-evaluation-harness/lm_eval/models/gguf.py +130 -0
  47. lm-evaluation-harness/lm_eval/models/huggingface.py +1274 -0
  48. lm-evaluation-harness/lm_eval/models/mamba_lm.py +126 -0
  49. lm-evaluation-harness/lm_eval/models/nemo_lm.py +537 -0
  50. lm-evaluation-harness/lm_eval/models/neuralmagic.py +426 -0
.gitattributes CHANGED
@@ -128,3 +128,4 @@ lm-evaluation-harness/wandb/run-20240605_140919-mkdnls2x/run-mkdnls2x.wandb filt
128
  lm-evaluation-harness/wandb/run-20240605_093020-laxetjfu/run-laxetjfu.wandb filter=lfs diff=lfs merge=lfs -text
129
  lm-evaluation-harness/wandb/run-20240606_045127-vb760voe/run-vb760voe.wandb filter=lfs diff=lfs merge=lfs -text
130
  venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text
 
 
128
  lm-evaluation-harness/wandb/run-20240605_093020-laxetjfu/run-laxetjfu.wandb filter=lfs diff=lfs merge=lfs -text
129
  lm-evaluation-harness/wandb/run-20240606_045127-vb760voe/run-vb760voe.wandb filter=lfs diff=lfs merge=lfs -text
130
  venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text
131
+ venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so filter=lfs diff=lfs merge=lfs -text
lm-evaluation-harness/lm_eval/api/__init__.py ADDED
File without changes
lm-evaluation-harness/lm_eval/api/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (155 Bytes). View file
 
lm-evaluation-harness/lm_eval/api/__pycache__/filter.cpython-310.pyc ADDED
Binary file (2.71 kB). View file
 
lm-evaluation-harness/lm_eval/api/__pycache__/instance.cpython-310.pyc ADDED
Binary file (1.51 kB). View file
 
lm-evaluation-harness/lm_eval/api/__pycache__/metrics.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
lm-evaluation-harness/lm_eval/api/__pycache__/model.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
lm-evaluation-harness/lm_eval/api/__pycache__/registry.cpython-310.pyc ADDED
Binary file (4.56 kB). View file
 
lm-evaluation-harness/lm_eval/api/__pycache__/samplers.cpython-310.pyc ADDED
Binary file (3.49 kB). View file
 
lm-evaluation-harness/lm_eval/api/__pycache__/task.cpython-310.pyc ADDED
Binary file (39.4 kB). View file
 
lm-evaluation-harness/lm_eval/api/filter.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from dataclasses import dataclass
3
+ from typing import Callable, Iterable, List, Union
4
+
5
+ from lm_eval.api.instance import Instance
6
+
7
+
8
+ class Filter(ABC):
9
+ """
10
+ Filter classes operate on a per-task level.
11
+ They take all model outputs (`instance.resps` for all `task.instances`)
12
+ across all instances of a task, and perform operations.
13
+ In a single run, one can configure any number of separate filters or lists of filters.
14
+
15
+ """
16
+
17
+ def __init__(self, **kwargs) -> None:
18
+ """
19
+ Can define custom behavior here, if an individual instantiation of a Filter class should have state.
20
+ """
21
+
22
+ @abstractmethod
23
+ def apply(self, resps: Union[List, Iterable], docs: List[dict]) -> Iterable:
24
+ """
25
+ Defines the operation to perform on a list of the `inst.resps` properties of `Instance` objects.
26
+ Should return the list of (filtered) response lists *in the same order as they were input*, e.g.
27
+ if pass in [<inst.resps for instance 0>, <inst.resps for instance 1>] should return
28
+ [<filtered resps for instance 0>, <filtered resps for instance 1>]
29
+ """
30
+ return resps
31
+
32
+
33
+ @dataclass
34
+ class FilterEnsemble:
35
+ """
36
+ FilterEnsemble creates a pipeline applying multiple filters.
37
+ Its intended usage is to stack multiple post-processing steps in order.
38
+ `task.apply_filters` should use a list of FilterEnsemble classes that it stores, to apply each
39
+ pipeline separately.
40
+ """
41
+
42
+ name: str
43
+ filters: List[Callable[[], Filter]]
44
+
45
+ def apply(self, instances: List[Instance]) -> None:
46
+ resps, docs = zip(*((inst.resps, inst.doc) for inst in instances))
47
+ resps, docs = list(resps), list(docs)
48
+
49
+ for f in self.filters:
50
+ # apply filters in sequence
51
+ resps = f().apply(resps, docs)
52
+
53
+ # add the end results after filtering to filtered_requests of their respective source instances.
54
+ # has key `self.name`: each FilterEnsemble applied in a given run should use a different name.
55
+ for inst, resp in zip(instances, resps):
56
+ inst.filtered_resps[self.name] = resp
lm-evaluation-harness/lm_eval/api/instance.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, field
2
+ from typing import Literal, Optional, Tuple
3
+
4
+
5
+ OutputType = Literal[
6
+ "loglikelihood", "loglikelihood_rolling", "generate_until", "multiple_choice"
7
+ ]
8
+
9
+
10
+ @dataclass
11
+ class Instance:
12
+ request_type: OutputType
13
+ doc: dict
14
+ arguments: tuple
15
+ idx: int
16
+ metadata: Tuple[Optional[str], Optional[int], Optional[int]] = field(
17
+ default_factory=lambda: (None, None, None)
18
+ )
19
+ resps: list = field(default_factory=list)
20
+ filtered_resps: dict = field(default_factory=dict)
21
+
22
+ # initialized after init
23
+ task_name: Optional[str] = None
24
+ doc_id: Optional[int] = None
25
+ repeats: Optional[int] = None
26
+
27
+ def __post_init__(self) -> None:
28
+ # unpack metadata field
29
+ self.task_name, self.doc_id, self.repeats = self.metadata
30
+
31
+ @property
32
+ def args(self):
33
+ """
34
+ Returns (string,) where `string` is the string to calculate loglikelihood over
35
+ """
36
+ return (
37
+ self.arguments if isinstance(self.arguments, tuple) else (self.arguments,)
38
+ )
lm-evaluation-harness/lm_eval/api/metrics.py ADDED
@@ -0,0 +1,509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import math
3
+ import random
4
+ from collections.abc import Iterable
5
+ from typing import List
6
+
7
+ import evaluate as hf_evaluate
8
+ import numpy as np
9
+ import sacrebleu
10
+ import sklearn.metrics
11
+
12
+ from lm_eval.api.registry import register_aggregation, register_metric
13
+
14
+
15
+ eval_logger = logging.getLogger("lm-eval")
16
+
17
+
18
+ # Register Aggregations First
19
+ @register_aggregation("bypass")
20
+ def bypass_agg(arr):
21
+ return 999
22
+
23
+
24
+ @register_aggregation("mean")
25
+ def mean(arr):
26
+ return sum(arr) / len(arr)
27
+
28
+
29
+ @register_aggregation("median")
30
+ def median(arr):
31
+ return arr[len(arr) // 2]
32
+
33
+
34
+ # Certain metrics must be calculated across all documents in a benchmark.
35
+ # We use them as aggregation metrics, paired with no-op passthrough metric fns.
36
+ @register_aggregation("perplexity")
37
+ def perplexity(items):
38
+ return math.exp(-mean(items))
39
+
40
+
41
+ @register_aggregation("weighted_perplexity")
42
+ def weighted_perplexity(items):
43
+ return math.exp(-weighted_mean(items))
44
+
45
+
46
+ @register_aggregation("bits_per_byte")
47
+ def bits_per_byte(items):
48
+ return -weighted_mean(items) / math.log(2)
49
+
50
+
51
+ @register_aggregation("f1")
52
+ def f1_score(items):
53
+ unzipped_list = list(zip(*items))
54
+ golds = unzipped_list[0]
55
+ preds = unzipped_list[1]
56
+ fscore = sklearn.metrics.f1_score(golds, preds)
57
+
58
+ return np.max(fscore)
59
+
60
+
61
+ @register_aggregation("matthews_corrcoef")
62
+ def matthews_corrcoef(items):
63
+ unzipped_list = list(zip(*items))
64
+ golds = unzipped_list[0]
65
+ preds = unzipped_list[1]
66
+ # print(preds)
67
+ return sklearn.metrics.matthews_corrcoef(golds, preds)
68
+
69
+
70
+ @register_aggregation("bleu")
71
+ def bleu(items):
72
+ """The Bilingual Evaluation Understudy Score, or BLEU for short, is a metric
73
+ for evaluating a generated sentence to a reference sentence. It counts matching
74
+ n-grams in the candidate translation to n-grams in the reference text, where
75
+ 1-gram or unigram would be each token and a bigram comparison would be each
76
+ word pair. The comparison is made regardless of word order
77
+ Source: https://machinelearningmastery.com/calculate-bleu-score-for-text-python/
78
+ Paper: https://www.aclweb.org/anthology/P02-1040/
79
+
80
+ Higher is better
81
+ """
82
+ refs = list(zip(*items))[0]
83
+ preds = list(zip(*items))[1]
84
+ refs, preds = _sacreformat(refs, preds)
85
+ return sacrebleu.corpus_bleu(preds, refs).score
86
+
87
+
88
+ @register_aggregation("chrf")
89
+ def chrf(items):
90
+ """chrF++ is a tool for automatic evaluation of machine translation output
91
+ based on character n-gram precision and recall enhanced with word n-grams.
92
+ Source: https://github.com/m-popovic/chrF
93
+ Paper: https://www.aclweb.org/anthology/W15-3049.pdf
94
+
95
+ Higher is better # TODO I think
96
+ """
97
+ refs = list(zip(*items))[0]
98
+ preds = list(zip(*items))[1]
99
+ refs, preds = _sacreformat(refs, preds)
100
+ return sacrebleu.corpus_chrf(preds, refs).score
101
+
102
+
103
+ @register_aggregation("ter")
104
+ def ter(items):
105
+ """Translation Error Rate is an error metric for machine translation that
106
+ measures the number of edits required to change a system output into one
107
+ of the references
108
+ Source: http://www.cs.umd.edu/~snover/tercom/
109
+ Paper: http://mt-archive.info/AMTA-2006-Snover.pdf
110
+
111
+ Lower is better
112
+ """
113
+ refs = list(zip(*items))[0]
114
+ preds = list(zip(*items))[1]
115
+ refs, preds = _sacreformat(refs, preds)
116
+ return sacrebleu.corpus_ter(preds, refs).score
117
+
118
+
119
+ @register_aggregation("brier_score")
120
+ def brier_score(items): # This is a passthrough function
121
+ gold, predictions = list(zip(*items))
122
+ gold = list(gold)
123
+ gold_one_hot = np.eye(np.max(gold) + 1)[gold]
124
+ predictions = list(zip(*items))[1]
125
+ return np.mean(np.sum((predictions - gold_one_hot) ** 2, axis=1))
126
+
127
+
128
+ @register_metric(
129
+ metric="brier_score",
130
+ higher_is_better=False,
131
+ output_type=["multiple_choice"],
132
+ aggregation="brier_score",
133
+ )
134
+ def brier_score_fn(items): # This is a passthrough function
135
+ return items
136
+
137
+
138
+ @register_metric(
139
+ metric="acc",
140
+ higher_is_better=True,
141
+ output_type=["loglikelihood", "multiple_choice"],
142
+ aggregation="mean",
143
+ )
144
+ def acc_fn(items): # This is a passthrough function
145
+ return items
146
+
147
+
148
+ @register_metric(
149
+ metric="acc_norm",
150
+ higher_is_better=True,
151
+ output_type=["loglikelihood", "multiple_choice"],
152
+ aggregation="mean",
153
+ )
154
+ def acc_norm_fn(items): # This is a passthrough function
155
+ return items
156
+
157
+
158
+ @register_metric(
159
+ metric="acc_mutual_info",
160
+ higher_is_better=True,
161
+ output_type="multiple_choice",
162
+ aggregation="mean",
163
+ )
164
+ def acc_mutual_info_fn(items): # This is a passthrough function
165
+ return items
166
+
167
+
168
+ exact_match = hf_evaluate.load("exact_match")
169
+
170
+
171
+ @register_metric(
172
+ metric="exact_match",
173
+ higher_is_better=True,
174
+ output_type="generate_until",
175
+ aggregation="mean",
176
+ )
177
+ def exact_match_fn(**kwargs):
178
+ return exact_match.compute(**kwargs)
179
+
180
+
181
+ @register_metric(
182
+ metric="perplexity",
183
+ higher_is_better=False,
184
+ output_type="loglikelihood",
185
+ aggregation="perplexity",
186
+ )
187
+ def perplexity_fn(items): # This is a passthrough function
188
+ return items
189
+
190
+
191
+ @register_metric(
192
+ metric="word_perplexity",
193
+ higher_is_better=False,
194
+ output_type="loglikelihood_rolling",
195
+ aggregation="weighted_perplexity",
196
+ )
197
+ def word_perplexity_fn(items): # This is a passthrough function
198
+ return items
199
+
200
+
201
+ @register_metric(
202
+ metric="byte_perplexity",
203
+ higher_is_better=False,
204
+ output_type="loglikelihood_rolling",
205
+ aggregation="weighted_perplexity",
206
+ )
207
+ def byte_perplexity_fn(items): # This is a passthrough function
208
+ return items
209
+
210
+
211
+ @register_metric(
212
+ metric="bits_per_byte",
213
+ higher_is_better=False,
214
+ output_type="loglikelihood_rolling",
215
+ aggregation="bits_per_byte",
216
+ )
217
+ def bits_per_byte_fn(items): # This is a passthrough function
218
+ return items
219
+
220
+
221
+ def pop_stddev(arr):
222
+ mu = mean(arr)
223
+ return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / len(arr))
224
+
225
+
226
+ def sample_stddev(arr):
227
+ mu = mean(arr)
228
+ return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / (len(arr) - 1))
229
+
230
+
231
+ def mean_stderr(arr):
232
+ return sample_stddev(arr) / math.sqrt(len(arr))
233
+
234
+
235
+ @register_metric(
236
+ metric="bypass",
237
+ higher_is_better=True,
238
+ output_type=["loglikelihood", "multiple_choice", "generate_until"],
239
+ aggregation="bypass",
240
+ )
241
+ def bypass(items):
242
+ return None
243
+
244
+
245
+ @register_metric(
246
+ metric="mcc",
247
+ higher_is_better=True,
248
+ output_type="multiple_choice",
249
+ aggregation="matthews_corrcoef",
250
+ )
251
+ def mcc_fn(items): # This is a passthrough function
252
+ return items
253
+
254
+
255
+ @register_metric(
256
+ metric="f1",
257
+ higher_is_better=True,
258
+ output_type="multiple_choice",
259
+ aggregation="f1",
260
+ )
261
+ def f1_fn(items): # This is a passthrough function
262
+ return items
263
+
264
+
265
+ @register_metric(
266
+ metric="bleu",
267
+ higher_is_better=True,
268
+ output_type="generate_until",
269
+ aggregation="bleu",
270
+ )
271
+ def bleu_fn(items): # This is a passthrough function
272
+ return items
273
+
274
+
275
+ @register_metric(
276
+ metric="chrf",
277
+ higher_is_better=True,
278
+ output_type="generate_until",
279
+ aggregation="chrf",
280
+ )
281
+ def chrf_fn(items): # This is a passthrough function
282
+ return items
283
+
284
+
285
+ @register_metric(
286
+ metric="ter",
287
+ higher_is_better=True,
288
+ output_type="generate_until",
289
+ aggregation="ter",
290
+ )
291
+ def ter_fn(items): # This is a passthrough function
292
+ return items
293
+
294
+
295
+ @register_metric(
296
+ metric="acc_all",
297
+ higher_is_better=True,
298
+ output_type="loglikelihood",
299
+ aggregation="mean",
300
+ )
301
+ def acc_all(items):
302
+ # Only count as correct if all answers are labeled correctly for each question
303
+ question_scoring_dict = {}
304
+ preds = list(zip(*items))[0]
305
+ docs = list(zip(*items))[1]
306
+
307
+ for doc, pred in zip(docs, preds):
308
+ paragraph_id = doc["idx"]["paragraph"]
309
+ question_id = doc["idx"]["question"]
310
+ if (paragraph_id, question_id) not in question_scoring_dict:
311
+ question_scoring_dict[(paragraph_id, question_id)] = []
312
+
313
+ gold_label = doc["label"] == 1
314
+
315
+ question_scoring_dict[(paragraph_id, question_id)].append(gold_label == pred)
316
+ acc = np.mean([int(all(x)) for x in question_scoring_dict.values()])
317
+ return acc
318
+
319
+
320
+ def acc_all_stderr(items):
321
+ # Only count as correct if all answers are labeled correctly for each question
322
+ question_scoring_dict = {}
323
+ preds = list(zip(*items))[0]
324
+ docs = list(zip(*items))[1]
325
+
326
+ for doc, pred in zip(docs, preds):
327
+ question_id = doc["idx"]["question"]
328
+ if question_id not in question_scoring_dict:
329
+ question_scoring_dict[question_id] = []
330
+
331
+ gold_label = doc["label"] == 1
332
+ question_scoring_dict[question_id].append(gold_label == pred)
333
+
334
+ acc = mean_stderr([int(all(x)) for x in question_scoring_dict.values()])
335
+ return acc
336
+
337
+
338
+ def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
339
+ """Compute max metric between prediction and each ground truth."""
340
+ scores_for_ground_truths = []
341
+ for ground_truth in ground_truths:
342
+ score = metric_fn(prediction, ground_truth)
343
+ scores_for_ground_truths.append(score)
344
+ return max(scores_for_ground_truths)
345
+
346
+
347
+ def weighted_mean(items):
348
+ a, b = zip(*items)
349
+ return sum(a) / sum(b)
350
+
351
+
352
+ def is_non_str_iterable(obj):
353
+ return isinstance(obj, Iterable) and not isinstance(obj, str)
354
+
355
+
356
+ def _sacreformat(refs, preds):
357
+ """Format refs and preds for sacrebleu corpus calculation. It is very particular"""
358
+ # Sacrebleu expects (List[str], List[List[str])
359
+ # e.g. sacrebleu.corpus_bleu([pred_t], [[ref1_stream], [ref2_stream], ...])
360
+
361
+ # Note [ref1_stream] is the first reference for each pred.
362
+ # So lists are size N and (M, N) for N preds and M possible refs for each pred
363
+ # This is a different order of dimensions that I would expect
364
+
365
+ # We expect refs to be List[str] or List[List[str]], the outer list corresponding to preds
366
+ # Must become List[List[str]] with the inner list corresponding to preds
367
+ if not is_non_str_iterable(refs):
368
+ refs = list(refs)
369
+ if not is_non_str_iterable(refs[0]):
370
+ refs = [[ref] for ref in refs]
371
+ refs = list(zip(*refs))
372
+ # Note the number of refs in each ref list much match the number of preds
373
+
374
+ # We expect preds to be List[str] or List[List[str]]. Must become List[str]
375
+ if not is_non_str_iterable(preds):
376
+ preds = list(preds)
377
+ if is_non_str_iterable(preds[0]):
378
+ assert len(preds[0]) == 1, f"Pred must be a str, was {preds[0]}"
379
+ preds = [pred[0] for pred in preds]
380
+
381
+ return refs, preds
382
+
383
+
384
+ # stderr stuff
385
+
386
+
387
+ class _bootstrap_internal:
388
+ def __init__(self, f, n) -> None:
389
+ self.f = f
390
+ self.n = n
391
+
392
+ def __call__(self, v):
393
+ i, xs = v
394
+ rnd = random.Random()
395
+ rnd.seed(i)
396
+ res = []
397
+ for _ in range(self.n):
398
+ res.append(self.f(rnd.choices(xs, k=len(xs))))
399
+ return res
400
+
401
+
402
+ def bootstrap_stderr(f, xs, iters):
403
+ import multiprocessing as mp
404
+
405
+ pool = mp.Pool(mp.cpu_count())
406
+ # this gives a biased estimate of the stderr (i.e w/ the mean, it gives something
407
+ # equivalent to stderr calculated without Bessel's correction in the stddev.
408
+ # Unfortunately, I haven't been able to figure out what the right correction is
409
+ # to make the bootstrap unbiased - i considered multiplying by sqrt(n/(n-1)) but
410
+ # that would be ad-hoc and I can't prove that that would actually be an unbiased estimator)
411
+ # Thankfully, shouldn't matter because our samples are pretty big usually anyways
412
+ res = []
413
+ chunk_size = min(1000, iters)
414
+ from tqdm import tqdm
415
+
416
+ print("bootstrapping for stddev:", f.__name__)
417
+ for bootstrap in tqdm(
418
+ pool.imap(
419
+ _bootstrap_internal(f, chunk_size),
420
+ [(i, xs) for i in range(iters // chunk_size)],
421
+ ),
422
+ total=iters // chunk_size,
423
+ ):
424
+ # sample w replacement
425
+ res.extend(bootstrap)
426
+
427
+ pool.close()
428
+ return sample_stddev(res)
429
+
430
+
431
+ def stderr_for_metric(metric, bootstrap_iters):
432
+ bootstrappable = [
433
+ median,
434
+ matthews_corrcoef,
435
+ f1_score,
436
+ perplexity,
437
+ bleu,
438
+ chrf,
439
+ ter,
440
+ ]
441
+
442
+ if metric in bootstrappable:
443
+ return lambda x: bootstrap_stderr(metric, x, iters=bootstrap_iters)
444
+
445
+ stderr = {mean: mean_stderr, acc_all: acc_all_stderr}
446
+
447
+ return stderr.get(metric, None)
448
+
449
+
450
+ def pooled_sample_stderr(stderrs: List[float], sizes: List[int]):
451
+ # Used to aggregate bootstrapped stderrs across subtasks in a group,
452
+ # when we are weighting by the size of each subtask.
453
+ #
454
+
455
+ assert len(stderrs) == len(sizes)
456
+
457
+ # formula source: https://en.wikipedia.org/wiki/Pooled_variance
458
+ # and: https://stats.stackexchange.com/a/4841331
459
+ # this empirically seems to match running `stderr_for_metric` on all instances
460
+ # from the subtasks concatenated with each other.
461
+ pooled_sample_var = (
462
+ sum([(size - 1) * stderr**2 * size for size, stderr in zip(sizes, stderrs)])
463
+ ) / (sum(sizes) - len(sizes))
464
+
465
+ return np.sqrt(pooled_sample_var / sum(sizes))
466
+
467
+
468
+ def combined_sample_stderr(stderrs: List[float], sizes: List[int], metrics=None):
469
+ assert (
470
+ metrics is not None
471
+ ), "Need to pass a list of each subtask's metric for this stderr aggregation"
472
+ assert len(stderrs) == len(sizes) and len(sizes) == len(metrics)
473
+
474
+ # See https://github.com/EleutherAI/lm-evaluation-harness/pull/1390 for more documentation.
475
+ # This formula depends on sample means.
476
+ # removed because it seems to give erroneously huge stderrs for groupings of tasks
477
+ # and does not seem to match up with bootstrap-calculated stderrs for groups.
478
+
479
+ ### don't use this unless a statistician has told you it's the right thing to do ###
480
+
481
+ # accumulators: we'll aggregate pairwise N - 1 times
482
+ variance = stderrs[0] ** 2
483
+ curr_size = sizes[0]
484
+ curr_score = metrics[0]
485
+
486
+ for stderr, size, score in zip(stderrs[1:], sizes[1:], metrics[1:]):
487
+ curr_score = ((curr_score * curr_size) + (score * size)) / (
488
+ curr_size + size
489
+ ) # NOTE: this assumes our aggregation fn is "mean"
490
+
491
+ variance = ((curr_size - 1) * variance + (size - 1) * (stderr**2)) / (
492
+ curr_size + size - 1
493
+ ) + curr_size * size / ((curr_size + size) * (curr_size + size - 1)) * (
494
+ curr_score - score
495
+ ) ** 2
496
+
497
+ return np.sqrt(variance)
498
+
499
+
500
+ def aggregate_subtask_metrics(metrics, sizes, weight_by_size=True):
501
+ # A helper function that is used to aggregate
502
+ # subtask scores cross-task.
503
+ # TODO: does not hold for non-mean aggregations
504
+ if not weight_by_size:
505
+ sizes = [1] * len(sizes)
506
+
507
+ assert len(metrics) == len(sizes)
508
+
509
+ return sum([metric * size for metric, size in zip(metrics, sizes)]) / sum(sizes)
lm-evaluation-harness/lm_eval/api/model.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import hashlib
3
+ import json
4
+ import logging
5
+ import os
6
+ from typing import List, Optional, Tuple, Type, TypeVar
7
+
8
+ import transformers
9
+ from sqlitedict import SqliteDict
10
+ from tqdm import tqdm
11
+
12
+ from lm_eval import utils
13
+
14
+
15
+ eval_logger = logging.getLogger("lm-eval")
16
+
17
+ T = TypeVar("T", bound="LM")
18
+
19
+
20
+ class LM(abc.ABC):
21
+ def __init__(self) -> None:
22
+ """Defines the interface that should be implemented by all LM subclasses.
23
+ LMs are assumed to take text (strings) as input and yield strings as output
24
+ (inputs/outputs should be tokenization-agnostic.)
25
+
26
+ """
27
+ # set rank and world size to a single process, by default.
28
+ self._rank = 0
29
+ self._world_size = 1
30
+ self.cache_hook = CacheHook(None)
31
+
32
+ @abc.abstractmethod
33
+ def loglikelihood(self, requests) -> List[Tuple[float, bool]]:
34
+ """Compute log-likelihood of generating a continuation from a context.
35
+ Downstream tasks should attempt to use loglikelihood instead of other
36
+ LM calls whenever possible.
37
+
38
+ :param requests: list[Instance]
39
+ A list of Instance objects, with property `args` which returns a tuple (context, continuation).
40
+ `context: str`
41
+ Context string. Implementations of LM must be able to handle an
42
+ empty context string.
43
+ `continuation: str`
44
+ The continuation over which log likelihood will be calculated. If
45
+ there is a word boundary, the space should be in the continuation.
46
+ For example, context="hello" continuation=" world" is correct.
47
+
48
+ :return: list[tuple[float, bool]]
49
+ A list of pairs (logprob, isgreedy)
50
+ `logprob: float`
51
+ The log probability of `continuation`.
52
+ `isgreedy`:
53
+ Whether `continuation` would be generated by greedy sampling from `context`.
54
+ """
55
+ pass
56
+
57
+ @abc.abstractmethod
58
+ def loglikelihood_rolling(self, requests) -> List[Tuple[float]]:
59
+ """Compute full log-likelihood of a string, with no truncation, for perplexity computation
60
+ - We will use the full max context length of the model.
61
+ - For inputs that exceed the max context length, we divide the tokenized string into chunks of up to
62
+ the max context length.
63
+ - IMPORTANT: Each document's loglikelihood/perplexity is computed *separately*, unlike other implementations
64
+ which may simply concatenate multiple documents together.
65
+ - IMPORTANT: We maximize the amount of context for each prediction. Specifically, for inputs that we break into
66
+ multiple chunks, the last input will still a full-sized context.
67
+ Example:
68
+ Input tokens: [ 0 1 2 3 4 5 6 7 8 9 ]
69
+ Prefix: BOS/EOS
70
+ Max context length: 4
71
+ Resulting input/prediction pairs:
72
+
73
+ INPUT: BOS 0 1 2
74
+ PRED: 0 1 2 3
75
+
76
+ INPUT: 3 4 5 6
77
+ PRED: 4 5 6 7
78
+
79
+ INPUT: 5 6 7 8
80
+ PRED: 8 9
81
+
82
+ Observe that:
83
+ 1. Each token is predicted exactly once
84
+ 2. For the last pair, we provide the full context, but only score the last two tokens
85
+
86
+ :param requests: list[Instance]
87
+ A list of Instance objects with property `args` which returns a tuple (context,).
88
+ string: str
89
+ String for which we are computing overall loglikelihood
90
+ :return: list[tuple[float]]
91
+ A list of tuples (logprob,)
92
+ logprob: float
93
+ The log probability of `context` conditioned on the BOS/EOS token.
94
+ Can also be overridden for custom cases by `prefix_token_id`.
95
+ """
96
+ pass
97
+
98
+ # TODO: Add an optional max length
99
+ @abc.abstractmethod
100
+ def generate_until(self, requests) -> List[str]:
101
+ """Generate greedily until a stopping sequence
102
+
103
+ :param requests: list[Instance]
104
+ A list of Instance objects with property `args` which returns a tuple (context, until).
105
+ context: str
106
+ Context string
107
+ until: [str]
108
+ The string sequences to generate until. These string sequences
109
+ may each span across multiple tokens, or may be part of one token.
110
+ :return: list[str]
111
+ A list of strings continuation
112
+ continuation: str
113
+ The generated continuation.
114
+ """
115
+ pass
116
+
117
+ @classmethod
118
+ def create_from_arg_string(
119
+ cls: Type[T], arg_string: str, additional_config: Optional[dict] = None
120
+ ) -> T:
121
+ """
122
+ Creates an instance of the LM class using the given argument string and additional config.
123
+
124
+ Parameters:
125
+ - arg_string: A string containing arguments in the format key1=value1,key2=value2.
126
+ - additional_config: Optional dictionary containing additional configuration parameters.
127
+
128
+ Returns:
129
+ - Instance of the LM class.
130
+ """
131
+ additional_config = {} if additional_config is None else additional_config
132
+ args = utils.simple_parse_args_string(arg_string)
133
+ args2 = {k: v for k, v in additional_config.items() if v is not None}
134
+ return cls(**args, **args2)
135
+
136
+ @classmethod
137
+ def create_from_arg_obj(
138
+ cls: Type[T], arg_dict: dict, additional_config: Optional[dict] = None
139
+ ) -> T:
140
+ """
141
+ Creates an instance of the LM class using the given arg_obj
142
+
143
+ Parameters:
144
+ - arg_obj: A dict containing arguments in the format key1=value1,key2=value2.
145
+ - additional_config: Optional dictionary containing additional configuration parameters.
146
+
147
+ Returns:
148
+ - Instance of the LM class.
149
+ """
150
+
151
+ additional_config = {} if additional_config is None else additional_config
152
+ additional_config = {
153
+ k: v for k, v in additional_config.items() if v is not None
154
+ }
155
+
156
+ return cls(**arg_dict, **additional_config)
157
+
158
+ @property
159
+ def rank(self):
160
+ # used in the case of parallelism. Hardcoded to
161
+ # ensure no errors arise using API models which do
162
+ # not support multi-device parallelism nor expect it.
163
+ return self._rank
164
+
165
+ @property
166
+ def world_size(self):
167
+ # used in the case of parallelism. Hardcoded to
168
+ # ensure no errors arise using API models which do
169
+ # not support multi-device parallelism nor expect it.
170
+ return self._world_size
171
+
172
+ def set_cache_hook(self, cache_hook) -> None:
173
+ self.cache_hook = cache_hook
174
+
175
+
176
+ ### SQLite-based caching of LM responses
177
+ def hash_args(attr, args):
178
+ dat = json.dumps([attr] + list(args))
179
+ return hashlib.sha256(dat.encode("utf-8")).hexdigest()
180
+
181
+
182
+ class CacheHook:
183
+ def __init__(self, cachinglm) -> None:
184
+ if cachinglm is None:
185
+ self.dbdict = None
186
+ return
187
+
188
+ self.dbdict = cachinglm.dbdict
189
+
190
+ def add_partial(self, attr, req, res) -> None:
191
+ if self.dbdict is None:
192
+ return
193
+ hsh = hash_args(attr, req)
194
+ self.dbdict[hsh] = res
195
+
196
+
197
+ class CachingLM:
198
+ def __init__(self, lm, cache_db) -> None:
199
+ """LM wrapper that returns cached results if they exist, and uses the underlying LM if not.
200
+
201
+ :param lm: LM
202
+ Underlying LM
203
+ :param cache_db: str
204
+ Path to cache db
205
+ """
206
+ self.lm = lm
207
+ self.cache_db = cache_db
208
+ if os.path.dirname(cache_db):
209
+ os.makedirs(os.path.dirname(cache_db), exist_ok=True)
210
+ self.dbdict = SqliteDict(cache_db, autocommit=True)
211
+
212
+ # add hook to lm
213
+ lm.set_cache_hook(self.get_cache_hook())
214
+
215
+ def __getattr__(self, attr):
216
+ lm_attr = getattr(self.lm, attr)
217
+ if not callable(lm_attr):
218
+ return lm_attr
219
+
220
+ def fn(requests):
221
+ res = []
222
+ remaining_reqs = []
223
+ warned = False
224
+ # figure out which ones are cached and which ones are new
225
+ eval_logger.info(
226
+ f"Loading '{attr}' responses from cache '{self.cache_db}' where possible..."
227
+ )
228
+ for req in tqdm(requests, desc="Checking cached requests"):
229
+ hsh = hash_args(attr, req.args)
230
+ if attr == "generate_until" and req.args[1].get("do_sample", False):
231
+ # when we are doing non-greedy generation, don't use the cache
232
+ # (else every "randomly sampled" generation would be identical for repeats > 1).
233
+ if not warned:
234
+ eval_logger.warning(
235
+ f"Arguments to lm.generate_until() '{req.args[1]}' include non-deterministic sampling. Caching will not be performed for such requests."
236
+ )
237
+ warned = True
238
+ res.append(None)
239
+ remaining_reqs.append(req)
240
+ elif hsh in self.dbdict:
241
+ ob = self.dbdict[hsh]
242
+
243
+ assert ob is not None
244
+
245
+ res.append(ob)
246
+ else:
247
+ res.append(None)
248
+ remaining_reqs.append(req)
249
+ eval_logger.info(
250
+ f"Cached requests: {len(requests) - len(remaining_reqs)}, Requests remaining: {len(remaining_reqs)}"
251
+ )
252
+ # actually run the LM on the requests that do not have cached results
253
+ rem_res = getattr(self.lm, attr)(remaining_reqs)
254
+
255
+ # stick the new ones back into the list and also cache any of the new ones
256
+ resptr = 0
257
+ for req, r in zip(remaining_reqs, rem_res):
258
+ while res[resptr] is not None:
259
+ resptr += 1
260
+
261
+ res[resptr] = r
262
+
263
+ # caching
264
+ hsh = hash_args(attr, req.args)
265
+ self.dbdict[hsh] = r
266
+ self.dbdict.commit()
267
+
268
+ return res
269
+
270
+ return fn
271
+
272
+ def get_cache_hook(self):
273
+ return CacheHook(self)
274
+
275
+
276
+ class TemplateLM(LM):
277
+ """
278
+ A class acting as intermediary between the LM base class
279
+ and boilerplate often included in other LM subclasses.
280
+ """
281
+
282
+ @property
283
+ @abc.abstractmethod
284
+ def eot_token_id(self):
285
+ pass
286
+
287
+ @property
288
+ def prefix_token_id(self):
289
+ # it is used as prefix for loglikelihood
290
+ return self.eot_token_id
291
+
292
+ @abc.abstractmethod
293
+ def tok_encode(self, string: str, **kwargs):
294
+ pass
295
+
296
+ @abc.abstractmethod
297
+ def _loglikelihood_tokens(self, requests, **kwargs):
298
+ pass
299
+
300
+ def _encode_pair(self, context, continuation):
301
+ n_spaces = len(context) - len(context.rstrip())
302
+ if n_spaces > 0:
303
+ continuation = context[-n_spaces:] + continuation
304
+ context = context[:-n_spaces]
305
+
306
+ model_class = getattr(self, "AUTO_MODEL_CLASS", None)
307
+
308
+ if model_class == transformers.AutoModelForSeq2SeqLM:
309
+ context_enc = self.tok_encode(context)
310
+ continuation_enc = self.tok_encode(continuation, add_special_tokens=False)
311
+ else:
312
+ whole_enc = self.tok_encode(context + continuation)
313
+ context_enc = self.tok_encode(context)
314
+
315
+ context_enc_len = len(context_enc)
316
+ continuation_enc = whole_enc[context_enc_len:]
317
+
318
+ return context_enc, continuation_enc
319
+
320
+ def loglikelihood(
321
+ self, requests, disable_tqdm: bool = False
322
+ ) -> List[Tuple[float, bool]]:
323
+ new_reqs = []
324
+ for context, continuation in [req.args for req in requests]:
325
+ if context == "":
326
+ # BOS or EOS as context
327
+ context_enc, continuation_enc = (
328
+ [self.prefix_token_id],
329
+ self.tok_encode(continuation),
330
+ )
331
+ else:
332
+ context_enc, continuation_enc = self._encode_pair(context, continuation)
333
+
334
+ new_reqs.append(((context, continuation), context_enc, continuation_enc))
335
+
336
+ return self._loglikelihood_tokens(new_reqs, disable_tqdm=disable_tqdm)
337
+
338
+ @abc.abstractmethod
339
+ def loglikelihood_rolling(
340
+ self, requests, disable_tqdm: bool = False
341
+ ) -> List[Tuple[float, bool]]:
342
+ pass
343
+
344
+ @abc.abstractmethod
345
+ def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]:
346
+ pass
lm-evaluation-harness/lm_eval/api/registry.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import Callable, Dict
3
+
4
+ import evaluate as hf_evaluate
5
+
6
+ from lm_eval.api.model import LM
7
+
8
+
9
+ eval_logger = logging.getLogger("lm-eval")
10
+
11
+ MODEL_REGISTRY = {}
12
+
13
+
14
+ def register_model(*names):
15
+ # either pass a list or a single alias.
16
+ # function receives them as a tuple of strings
17
+
18
+ def decorate(cls):
19
+ for name in names:
20
+ assert issubclass(
21
+ cls, LM
22
+ ), f"Model '{name}' ({cls.__name__}) must extend LM class"
23
+
24
+ assert (
25
+ name not in MODEL_REGISTRY
26
+ ), f"Model named '{name}' conflicts with existing model! Please register with a non-conflicting alias instead."
27
+
28
+ MODEL_REGISTRY[name] = cls
29
+ return cls
30
+
31
+ return decorate
32
+
33
+
34
+ def get_model(model_name):
35
+ try:
36
+ return MODEL_REGISTRY[model_name]
37
+ except KeyError:
38
+ raise ValueError(
39
+ f"Attempted to load model '{model_name}', but no model for this name found! Supported model names: {', '.join(MODEL_REGISTRY.keys())}"
40
+ )
41
+
42
+
43
+ TASK_REGISTRY = {}
44
+ GROUP_REGISTRY = {}
45
+ ALL_TASKS = set()
46
+ func2task_index = {}
47
+
48
+
49
+ def register_task(name):
50
+ def decorate(fn):
51
+ assert (
52
+ name not in TASK_REGISTRY
53
+ ), f"task named '{name}' conflicts with existing registered task!"
54
+
55
+ TASK_REGISTRY[name] = fn
56
+ ALL_TASKS.add(name)
57
+ func2task_index[fn.__name__] = name
58
+ return fn
59
+
60
+ return decorate
61
+
62
+
63
+ def register_group(name):
64
+ def decorate(fn):
65
+ func_name = func2task_index[fn.__name__]
66
+ if name in GROUP_REGISTRY:
67
+ GROUP_REGISTRY[name].append(func_name)
68
+ else:
69
+ GROUP_REGISTRY[name] = [func_name]
70
+ ALL_TASKS.add(name)
71
+ return fn
72
+
73
+ return decorate
74
+
75
+
76
+ OUTPUT_TYPE_REGISTRY = {}
77
+ METRIC_REGISTRY = {}
78
+ METRIC_AGGREGATION_REGISTRY = {}
79
+ AGGREGATION_REGISTRY: Dict[str, Callable[[], Dict[str, Callable]]] = {}
80
+ HIGHER_IS_BETTER_REGISTRY = {}
81
+
82
+ DEFAULT_METRIC_REGISTRY = {
83
+ "loglikelihood": [
84
+ "perplexity",
85
+ "acc",
86
+ ],
87
+ "loglikelihood_rolling": ["word_perplexity", "byte_perplexity", "bits_per_byte"],
88
+ "multiple_choice": ["acc", "acc_norm"],
89
+ "generate_until": ["exact_match"],
90
+ }
91
+
92
+
93
+ def register_metric(**args):
94
+ # TODO: do we want to enforce a certain interface to registered metrics?
95
+ def decorate(fn):
96
+ assert "metric" in args
97
+ name = args["metric"]
98
+
99
+ for key, registry in [
100
+ ("metric", METRIC_REGISTRY),
101
+ ("higher_is_better", HIGHER_IS_BETTER_REGISTRY),
102
+ ("aggregation", METRIC_AGGREGATION_REGISTRY),
103
+ ]:
104
+ if key in args:
105
+ value = args[key]
106
+ assert (
107
+ value not in registry
108
+ ), f"{key} named '{value}' conflicts with existing registered {key}!"
109
+
110
+ if key == "metric":
111
+ registry[name] = fn
112
+ elif key == "aggregation":
113
+ registry[name] = AGGREGATION_REGISTRY[value]
114
+ else:
115
+ registry[name] = value
116
+
117
+ return fn
118
+
119
+ return decorate
120
+
121
+
122
+ def get_metric(name: str, hf_evaluate_metric=False) -> Callable:
123
+ if not hf_evaluate_metric:
124
+ if name in METRIC_REGISTRY:
125
+ return METRIC_REGISTRY[name]
126
+ else:
127
+ eval_logger.warning(
128
+ f"Could not find registered metric '{name}' in lm-eval, searching in HF Evaluate library..."
129
+ )
130
+
131
+ try:
132
+ metric_object = hf_evaluate.load(name)
133
+ return metric_object.compute
134
+ except Exception:
135
+ eval_logger.error(
136
+ f"{name} not found in the evaluate library! Please check https://huggingface.co/evaluate-metric",
137
+ )
138
+
139
+
140
+ def register_aggregation(name: str):
141
+ def decorate(fn):
142
+ assert (
143
+ name not in AGGREGATION_REGISTRY
144
+ ), f"aggregation named '{name}' conflicts with existing registered aggregation!"
145
+
146
+ AGGREGATION_REGISTRY[name] = fn
147
+ return fn
148
+
149
+ return decorate
150
+
151
+
152
+ def get_aggregation(name: str) -> Callable[[], Dict[str, Callable]]:
153
+ try:
154
+ return AGGREGATION_REGISTRY[name]
155
+ except KeyError:
156
+ eval_logger.warning(f"{name} not a registered aggregation metric!")
157
+
158
+
159
+ def get_metric_aggregation(name: str) -> Callable[[], Dict[str, Callable]]:
160
+ try:
161
+ return METRIC_AGGREGATION_REGISTRY[name]
162
+ except KeyError:
163
+ eval_logger.warning(f"{name} metric is not assigned a default aggregation!")
164
+
165
+
166
+ def is_higher_better(metric_name) -> bool:
167
+ try:
168
+ return HIGHER_IS_BETTER_REGISTRY[metric_name]
169
+ except KeyError:
170
+ eval_logger.warning(
171
+ f"higher_is_better not specified for metric '{metric_name}'!"
172
+ )
lm-evaluation-harness/lm_eval/api/samplers.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class ContextSampler:
2
+ def __init__(self, docs, task, fewshot_indices=None, rnd=None) -> None:
3
+ self.rnd = rnd
4
+ assert self.rnd, "must pass rnd to FewShotSampler!"
5
+
6
+ self.task = task
7
+ self.config = task._config
8
+
9
+ self.target_delimiter = self.config.target_delimiter
10
+ self.fewshot_delimiter = self.config.fewshot_delimiter
11
+
12
+ self.doc_to_text = self.task.doc_to_text
13
+ self.doc_to_target = self.task.doc_to_target
14
+ self.doc_to_choice = self.task.doc_to_choice
15
+
16
+ self.docs = docs # HF dataset split, provided by task._fewshot_docs()
17
+ if fewshot_indices: # subset few-shot docs from
18
+ self.docs = self.docs.select(fewshot_indices)
19
+
20
+ def get_context(self, doc, num_fewshot):
21
+ # draw an extra fewshot sample if using same split as evaluating on
22
+ n_samples = (
23
+ num_fewshot + 1
24
+ if self.config.fewshot_split == self.config.test_split
25
+ else num_fewshot
26
+ )
27
+
28
+ # draw `n_samples` docs from fewshot_docs
29
+ fewshotex = self.sample(n_samples)
30
+
31
+ # get rid of the doc that's the one we're evaluating, if it's in the fewshot
32
+ # TODO: should we just stop people from using fewshot from same split as evaluating?
33
+ selected_docs = [x for x in fewshotex if x != doc][:num_fewshot]
34
+
35
+ labeled_examples = (
36
+ self.fewshot_delimiter.join(
37
+ [
38
+ # TODO: is separating doc_to_text and doc_to_target by one space always desired?
39
+ (
40
+ self.doc_to_text(doc)
41
+ if (
42
+ self.config.doc_to_choice is None
43
+ or isinstance(self.doc_to_text(doc), str)
44
+ )
45
+ else self.doc_to_choice(doc)[self.doc_to_text(doc)]
46
+ )
47
+ + self.target_delimiter
48
+ + (
49
+ str(self.doc_to_target(doc)[0])
50
+ if isinstance(self.doc_to_target(doc), list)
51
+ else self.doc_to_target(doc)
52
+ if (
53
+ self.config.doc_to_choice is None
54
+ or isinstance(self.doc_to_target(doc), str)
55
+ )
56
+ else str(self.doc_to_choice(doc)[self.doc_to_target(doc)])
57
+ )
58
+ for doc in selected_docs
59
+ ]
60
+ )
61
+ + self.fewshot_delimiter
62
+ )
63
+
64
+ return labeled_examples
65
+
66
+ def sample(self, n):
67
+ """
68
+ Draw `n` samples from our fewshot docs. This method should be overridden by subclasses.
69
+ """
70
+
71
+ return self.rnd.sample(self.docs, n)
72
+
73
+
74
+ class FirstNSampler(ContextSampler):
75
+ def sample(self, n) -> None:
76
+ """
77
+ Draw the first `n` samples in order from the specified split.
78
+ Used for tasks with "canonical" ordered fewshot examples, such as MMLU and CMMLU.
79
+ """
80
+ assert (
81
+ n <= len(self.docs)
82
+ ), f"Error: number of fewshot samples requested exceeds the {len(self.docs)} that are available."
83
+ return self.docs[:n]
84
+
85
+
86
+ class BalancedSampler(ContextSampler):
87
+ def sample(self, n) -> None:
88
+ """
89
+ TODO: this should return approximately class-balanced samples from our fewshot examples.
90
+ TODO: what order should they be in? maybe random?
91
+ """
92
+
93
+ pass
94
+
95
+
96
+ class ManualSampler(ContextSampler):
97
+ def sample(self, n) -> None:
98
+ """ """
99
+ pass
100
+
101
+
102
+ SAMPLER_REGISTRY = {
103
+ "default": ContextSampler,
104
+ "first_n": FirstNSampler,
105
+ }
106
+
107
+
108
+ def get_sampler(name):
109
+ try:
110
+ return SAMPLER_REGISTRY[name]
111
+ except KeyError:
112
+ raise ValueError(
113
+ f"Attempted to use contextsampler '{name}', but no sampling strategy for this name found! Supported model names: {', '.join(SAMPLER_REGISTRY.keys())}"
114
+ )
lm-evaluation-harness/lm_eval/api/task.py ADDED
@@ -0,0 +1,1498 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import ast
3
+ import logging
4
+ import random
5
+ import re
6
+ from collections.abc import Callable
7
+ from copy import deepcopy
8
+ from dataclasses import asdict, dataclass
9
+ from inspect import getsource
10
+ from typing import (
11
+ Any,
12
+ Dict,
13
+ Iterable,
14
+ Iterator,
15
+ List,
16
+ Literal,
17
+ Mapping,
18
+ Optional,
19
+ Tuple,
20
+ Union,
21
+ )
22
+
23
+ import datasets
24
+ import numpy as np
25
+ from tqdm import tqdm
26
+
27
+ from lm_eval import utils
28
+ from lm_eval.api import samplers
29
+ from lm_eval.api.instance import Instance, OutputType
30
+ from lm_eval.api.metrics import bits_per_byte, mean, weighted_perplexity
31
+ from lm_eval.api.registry import (
32
+ AGGREGATION_REGISTRY,
33
+ DEFAULT_METRIC_REGISTRY,
34
+ get_aggregation,
35
+ get_metric,
36
+ get_metric_aggregation,
37
+ is_higher_better,
38
+ )
39
+ from lm_eval.caching.cache import load_from_cache, save_to_cache
40
+ from lm_eval.filters import build_filter_ensemble
41
+ from lm_eval.prompts import get_prompt
42
+
43
+
44
+ ALL_OUTPUT_TYPES = [
45
+ "loglikelihood",
46
+ "multiple_choice",
47
+ "loglikelihood_rolling",
48
+ "generate_until",
49
+ ]
50
+
51
+ eval_logger = logging.getLogger("lm-eval")
52
+
53
+
54
+ @dataclass
55
+ class TaskConfig(dict):
56
+ # task naming/registry
57
+ task: Optional[str] = None
58
+ task_alias: Optional[str] = None
59
+ group: Optional[Union[str, list]] = None
60
+ group_alias: Optional[Union[str, list]] = None
61
+ # HF dataset options.
62
+ # which dataset to use,
63
+ # and what splits for what purpose
64
+ dataset_path: Optional[str] = None
65
+ dataset_name: Optional[str] = None
66
+ dataset_kwargs: Optional[dict] = None
67
+ training_split: Optional[str] = None
68
+ validation_split: Optional[str] = None
69
+ test_split: Optional[str] = None
70
+ fewshot_split: Optional[
71
+ str
72
+ ] = None # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
73
+ # formatting / prompting options.
74
+ # see docs/advanced_task_guide.md for more info
75
+ process_docs: Optional[Callable] = None
76
+ doc_to_text: Optional[Union[Callable, str]] = None
77
+ doc_to_target: Optional[Union[Callable, str]] = None
78
+ doc_to_choice: Optional[Union[Callable, str, dict, list]] = None
79
+ process_results: Optional[Union[Callable, str]] = None
80
+ use_prompt: Optional[str] = None
81
+ description: str = ""
82
+ target_delimiter: str = " "
83
+ fewshot_delimiter: str = "\n\n"
84
+ fewshot_config: Optional[dict] = None
85
+ # runtime configuration options
86
+ num_fewshot: Optional[int] = None
87
+ # scoring options
88
+ metric_list: Optional[list] = None
89
+ output_type: OutputType = "generate_until"
90
+ generation_kwargs: Optional[dict] = None
91
+ repeats: int = 1
92
+ filter_list: Optional[Union[str, list]] = None
93
+ should_decontaminate: bool = False
94
+ doc_to_decontamination_query: Optional[str] = None
95
+ metadata: Optional[
96
+ dict
97
+ ] = None # by default, not used in the code. allows for users to pass arbitrary info to tasks
98
+
99
+ def __post_init__(self) -> None:
100
+ if self.generation_kwargs is not None:
101
+ if self.output_type != "generate_until":
102
+ eval_logger.warning(
103
+ f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
104
+ )
105
+
106
+ if "temperature" in self.generation_kwargs:
107
+ self.generation_kwargs["temperature"] = float(
108
+ self.generation_kwargs["temperature"]
109
+ )
110
+
111
+ if "until" not in self.generation_kwargs:
112
+ self.generation_kwargs["until"] = [self.fewshot_delimiter]
113
+ else:
114
+ if self.output_type == "generate_until":
115
+ # ensure that we greedily generate in absence of explicit arguments otherwise
116
+ self.generation_kwargs = {
117
+ "until": (
118
+ None
119
+ if self.fewshot_delimiter is None
120
+ else [self.fewshot_delimiter]
121
+ ),
122
+ "do_sample": False,
123
+ }
124
+
125
+ def __getitem__(self, item):
126
+ return getattr(self, item)
127
+
128
+ def __setitem__(self, item, value):
129
+ return setattr(self, item, value)
130
+
131
+ def to_dict(self, keep_callable: bool = False) -> dict:
132
+ """dumps the current config as a dictionary object, as a printable format.
133
+ null fields will not be printed.
134
+ Used for dumping results alongside full task configuration
135
+
136
+ :return: dict
137
+ A printable dictionary version of the TaskConfig object.
138
+
139
+ # TODO: should any default value in the TaskConfig not be printed?
140
+ """
141
+ cfg_dict = asdict(self)
142
+ # remove values that are `None`
143
+ for k, v in list(cfg_dict.items()):
144
+ if v is None:
145
+ cfg_dict.pop(k)
146
+ elif k == "metric_list":
147
+ for metric_dict in v:
148
+ for metric_key, metric_value in metric_dict.items():
149
+ if callable(metric_value):
150
+ metric_dict[metric_key] = self.serialize_function(
151
+ metric_value, keep_callable=keep_callable
152
+ )
153
+ cfg_dict[k] = v
154
+ elif callable(v):
155
+ cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
156
+ return cfg_dict
157
+
158
+ def serialize_function(
159
+ self, value: Union[Callable, str], keep_callable=False
160
+ ) -> Union[Callable, str]:
161
+ """Serializes a given function or string.
162
+
163
+ If 'keep_callable' is True, the original callable is returned.
164
+ Otherwise, attempts to return the source code of the callable using 'getsource'.
165
+ """
166
+ if keep_callable:
167
+ return value
168
+ else:
169
+ try:
170
+ return getsource(value)
171
+ except (TypeError, OSError):
172
+ return str(value)
173
+
174
+
175
+ class Task(abc.ABC):
176
+ """A task represents an entire benchmark including its dataset, problems,
177
+ answers, and evaluation methods. See BoolQ for a simple example implementation
178
+
179
+ A `doc` can be any python object which represents one instance of evaluation.
180
+ This is usually a dictionary e.g.
181
+ {"question": ..., "answer": ...} or
182
+ {"question": ..., question, answer)
183
+ """
184
+
185
+ VERSION: Optional[Union[int, str]] = None
186
+
187
+ # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
188
+ # or a path to a custom `datasets` loading script.
189
+ DATASET_PATH: Optional[str] = None
190
+
191
+ # The name of a subset within `DATASET_PATH`.
192
+ DATASET_NAME: Optional[str] = None
193
+
194
+ OUTPUT_TYPE: Optional[OutputType] = None
195
+
196
+ def __init__(
197
+ self,
198
+ data_dir: Optional[str] = None,
199
+ cache_dir: Optional[str] = None,
200
+ download_mode: Optional[datasets.DownloadMode] = None,
201
+ config: Optional[Mapping] = None, # Union[dict, TaskConfig]
202
+ ) -> None:
203
+ """
204
+ :param data_dir: str
205
+ Stores the path to a local folder containing the `Task`'s data files.
206
+ Use this to specify the path to manually downloaded data (usually when
207
+ the dataset is not publicly accessible).
208
+ :param cache_dir: str
209
+ The directory to read/write the `Task` dataset. This follows the
210
+ HuggingFace `datasets` API with the default cache directory located at:
211
+ `~/.cache/huggingface/datasets`
212
+ NOTE: You can change the cache location globally for a given process
213
+ to another directory:
214
+ `export HF_DATASETS_CACHE="/path/to/another/directory"`
215
+ :param download_mode: datasets.DownloadMode
216
+ How to treat pre-existing `Task` downloads and data.
217
+ - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
218
+ Reuse download and reuse dataset.
219
+ - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
220
+ Reuse download with fresh dataset.
221
+ - `datasets.DownloadMode.FORCE_REDOWNLOAD`
222
+ Fresh download and fresh dataset.
223
+ """
224
+ self.download(data_dir, cache_dir, download_mode)
225
+ self._training_docs: Optional[list] = None
226
+ self._fewshot_docs: Optional[list] = None
227
+ self._instances: Optional[List[Instance]] = None
228
+
229
+ self._config: TaskConfig = TaskConfig({**config}) if config else TaskConfig()
230
+
231
+ self._filters = [build_filter_ensemble("none", [["take_first", None]])]
232
+
233
+ def download(
234
+ self,
235
+ data_dir: Optional[str] = None,
236
+ cache_dir: Optional[str] = None,
237
+ download_mode=None,
238
+ ) -> None:
239
+ """Downloads and returns the task dataset.
240
+ Override this method to download the dataset from a custom API.
241
+
242
+ :param data_dir: str
243
+ Stores the path to a local folder containing the `Task`'s data files.
244
+ Use this to specify the path to manually downloaded data (usually when
245
+ the dataset is not publicly accessible).
246
+ :param cache_dir: str
247
+ The directory to read/write the `Task` dataset. This follows the
248
+ HuggingFace `datasets` API with the default cache directory located at:
249
+ `~/.cache/huggingface/datasets`
250
+ NOTE: You can change the cache location globally for a given process
251
+ by setting the shell environment variable, `HF_DATASETS_CACHE`,
252
+ to another directory:
253
+ `export HF_DATASETS_CACHE="/path/to/another/directory"`
254
+ :param download_mode: datasets.DownloadMode
255
+ How to treat pre-existing `Task` downloads and data.
256
+ - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
257
+ Reuse download and reuse dataset.
258
+ - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
259
+ Reuse download with fresh dataset.
260
+ - `datasets.DownloadMode.FORCE_REDOWNLOAD`
261
+ Fresh download and fresh dataset.
262
+ """
263
+ self.dataset = datasets.load_dataset(
264
+ path=self.DATASET_PATH,
265
+ name=self.DATASET_NAME,
266
+ data_dir=data_dir,
267
+ cache_dir=cache_dir,
268
+ download_mode=download_mode,
269
+ )
270
+
271
+ @property
272
+ def config(self) -> TaskConfig:
273
+ """Returns the TaskConfig associated with this class."""
274
+ return self._config
275
+
276
+ @abc.abstractmethod
277
+ def has_training_docs(self):
278
+ """Whether the task has a training set"""
279
+ pass
280
+
281
+ @abc.abstractmethod
282
+ def has_validation_docs(self):
283
+ """Whether the task has a validation set"""
284
+ pass
285
+
286
+ @abc.abstractmethod
287
+ def has_test_docs(self):
288
+ """Whether the task has a test set"""
289
+ pass
290
+
291
+ def training_docs(self) -> Iterable:
292
+ """
293
+ :return: Iterable[obj]
294
+ A iterable of any object, that doc_to_text can handle
295
+ """
296
+ return []
297
+
298
+ def validation_docs(self) -> Iterable:
299
+ """
300
+ :return: Iterable[obj]
301
+ A iterable of any object, that doc_to_text can handle
302
+ """
303
+ return []
304
+
305
+ def test_docs(self) -> Iterable:
306
+ """
307
+ :return: Iterable[obj]
308
+ A iterable of any object, that doc_to_text can handle
309
+ """
310
+ return []
311
+
312
+ def fewshot_docs(self) -> Iterable:
313
+ """
314
+ :return: Iterable[obj]
315
+ A iterable of any object, that doc_to_text can handle
316
+ """
317
+ if self.has_training_docs():
318
+ return self.training_docs()
319
+ elif self.has_validation_docs():
320
+ return self.validation_docs()
321
+ else:
322
+ eval_logger.warning(
323
+ f"[Task: {self.config.task}] has_training_docs and has_validation_docs are False"
324
+ ", using test_docs as fewshot_docs but this is not recommended."
325
+ )
326
+ return self.test_docs()
327
+
328
+ def _process_doc(self, doc: dict) -> dict:
329
+ """
330
+ Override this to process (detokenize, strip, replace, etc.) individual
331
+ documents. This can be used in a map over documents of a data split.
332
+ E.g. `map(self._process_doc, self.dataset["validation"])`
333
+
334
+ :return: dict
335
+ The processed version of the specified `doc`.
336
+ """
337
+ return doc
338
+
339
+ @property
340
+ def instances(self) -> List[Instance]:
341
+ """After calling `task.build_all_requests()`, tasks
342
+ maintain a list of the dataset instances which will be evaluated.
343
+ """
344
+ return self._instances
345
+
346
+ def fewshot_examples(self, k, rnd):
347
+ if self._training_docs is None:
348
+ self._training_docs = list(self.training_docs())
349
+
350
+ return rnd.sample(self._training_docs, k)
351
+
352
+ def doc_to_decontamination_query(self, doc):
353
+ raise NotImplementedError(
354
+ "Override doc_to_decontamination_query with document specific decontamination query."
355
+ )
356
+
357
+ @abc.abstractmethod
358
+ def doc_to_text(self, doc):
359
+ pass
360
+
361
+ @abc.abstractmethod
362
+ def doc_to_target(self, doc):
363
+ pass
364
+
365
+ def build_all_requests(
366
+ self,
367
+ *,
368
+ limit=None,
369
+ rank=None,
370
+ world_size=None,
371
+ cache_requests=False,
372
+ rewrite_requests_cache=False,
373
+ ) -> None:
374
+ """Build a set of Instances for a task, and store them in task.instances"""
375
+
376
+ # used with caching
377
+ og_limit = limit
378
+
379
+ cache_key = f"requests-{self._config.task}-{self.config.num_fewshot}shot-rank{rank}-world_size{world_size}"
380
+
381
+ cached_instances = load_from_cache(file_name=cache_key)
382
+
383
+ if cache_requests and cached_instances and not rewrite_requests_cache:
384
+ cached_instances = cached_instances[:limit]
385
+
386
+ flattened_instances = [
387
+ instance
388
+ for instance_group in cached_instances
389
+ for instance in instance_group
390
+ ]
391
+
392
+ self._instances = flattened_instances
393
+ return
394
+
395
+ eval_logger.info(f"Building contexts for {self.config.task} on rank {rank}...")
396
+
397
+ instances = []
398
+
399
+ # process all documents when caching is specified for simplicity
400
+ if (
401
+ cache_requests
402
+ and (not cached_instances or rewrite_requests_cache)
403
+ and limit is not None
404
+ ):
405
+ limit = None
406
+
407
+ doc_id_docs = list(
408
+ self.doc_iterator(rank=rank, limit=limit, world_size=world_size)
409
+ )
410
+
411
+ num_docs = len(doc_id_docs)
412
+
413
+ for doc_id, doc in tqdm(
414
+ doc_id_docs,
415
+ total=num_docs,
416
+ ):
417
+ # sample fewshot context #TODO: need to offset doc_id by rank now!
418
+ fewshot_ctx = self.fewshot_context(
419
+ doc,
420
+ 0 if self.config.num_fewshot is None else self.config.num_fewshot,
421
+ )
422
+
423
+ # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
424
+ inst = self.construct_requests(
425
+ doc=doc,
426
+ ctx=fewshot_ctx,
427
+ metadata=(self.config["task"], doc_id, self.config.repeats),
428
+ )
429
+
430
+ if not isinstance(inst, list):
431
+ inst = [inst]
432
+
433
+ instances.append(inst)
434
+
435
+ # now flatten, this is to allow slicing to work with pickles
436
+
437
+ sliced_instances = instances[:og_limit]
438
+
439
+ flattened_instances = [
440
+ instance
441
+ for instance_group in sliced_instances
442
+ for instance in instance_group
443
+ ]
444
+
445
+ self._instances = flattened_instances
446
+
447
+ if len(self._instances) == 0:
448
+ raise ValueError("task.build_requests() did not find any docs!")
449
+
450
+ if cache_requests and (not cached_instances or rewrite_requests_cache):
451
+ save_to_cache(file_name=cache_key, obj=instances)
452
+
453
+ @abc.abstractmethod
454
+ def construct_requests(self, doc, ctx, **kwargs):
455
+ """Uses RequestFactory to construct Requests and returns an iterable of
456
+ Requests which will be sent to the LM.
457
+
458
+ :param doc:
459
+ The document as returned from training_docs, validation_docs, or test_docs.
460
+ :param ctx: str
461
+ The context string, generated by fewshot_context. This includes the natural
462
+ language description, as well as the few shot examples, and the question
463
+ part of the document for `doc`.
464
+ :param doc_idx: int
465
+ The index of a document within `self.test_docs()` or `self.validation_docs()`,
466
+ whichever is the main split used.
467
+ :param repeats: int
468
+ TODO: update this docstring
469
+ The number of times each instance in a dataset is inferred on. Defaults to 1,
470
+ can be increased for techniques like majority voting.
471
+ """
472
+ pass
473
+
474
+ @abc.abstractmethod
475
+ def process_results(self, doc, results):
476
+ """Take a single document and the LM results and evaluates, returning a
477
+ dict where keys are the names of submetrics and values are the values of
478
+ the metric for that one document
479
+
480
+ :param doc:
481
+ The document as returned from training_docs, validation_docs, or test_docs.
482
+ :param results:
483
+ The results of the requests created in construct_requests.
484
+ """
485
+ pass
486
+
487
+ @abc.abstractmethod
488
+ def aggregation(self):
489
+ """
490
+ :returns: {str: [metric_score] -> float}
491
+ A dictionary where keys are the names of submetrics and values are
492
+ functions that aggregate a list of metric scores
493
+ """
494
+ pass
495
+
496
+ @abc.abstractmethod
497
+ def higher_is_better(self):
498
+ """
499
+ :returns: {str: bool}
500
+ A dictionary where keys are the names of submetrics and values are
501
+ whether a higher value of the submetric is better
502
+ """
503
+ pass
504
+
505
+ def get_config(self, key: str) -> Any:
506
+ return getattr(self._config, key, None)
507
+
508
+ @classmethod
509
+ def count_bytes(cls, doc):
510
+ """Used for byte-level perplexity metrics in rolling loglikelihood"""
511
+ return len(doc.encode("utf-8"))
512
+
513
+ @classmethod
514
+ def count_words(cls, doc):
515
+ """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
516
+ return len(re.split(r"\s+", doc))
517
+
518
+ @utils.positional_deprecated
519
+ def fewshot_context(
520
+ self,
521
+ doc,
522
+ num_fewshot,
523
+ rnd=random.Random(1234),
524
+ description=None,
525
+ ):
526
+ """Returns a fewshot context string that is made up of a prepended description
527
+ (if provided), the `num_fewshot` number of examples, and an appended prompt example.
528
+
529
+ :param doc: str
530
+ The document as returned from training_docs, validation_docs, or test_docs.
531
+ :param num_fewshot: int
532
+ The number of fewshot examples to provide in the returned context string.
533
+ :param rnd: random.Random
534
+ The pseudo-random number generator used to randomly sample examples.
535
+ WARNING: This is currently a required arg although it's optionalized with a default `None`.
536
+ :param description: str
537
+ The task's description that will be prepended to the fewshot examples.
538
+ :returns: str
539
+ The fewshot context.
540
+ """
541
+ if rnd is None:
542
+ raise ValueError(
543
+ "A `random.Random` generator argument must be provided to `rnd`"
544
+ )
545
+
546
+ description = description if description else ""
547
+
548
+ if num_fewshot == 0:
549
+ labeled_examples = ""
550
+ else:
551
+ # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
552
+ if self.has_training_docs():
553
+ fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
554
+ else:
555
+ if self._fewshot_docs is None:
556
+ self._fewshot_docs = list(
557
+ self.validation_docs()
558
+ if self.has_validation_docs()
559
+ else self.test_docs()
560
+ )
561
+
562
+ fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)
563
+
564
+ # get rid of the doc that's the one we're evaluating, if it's in the fewshot
565
+ fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]
566
+
567
+ labeled_examples = (
568
+ "\n\n".join(
569
+ [
570
+ self.doc_to_text(doc) + self.doc_to_target(doc)
571
+ for doc in fewshotex
572
+ ]
573
+ )
574
+ + "\n\n"
575
+ )
576
+
577
+ example = self.doc_to_text(doc)
578
+ return description + labeled_examples + example
579
+
580
+ def apply_filters(self) -> Optional[List[Instance]]:
581
+ """Iterates over FilterEnsembles and applies them to instances"""
582
+ if hasattr(self, "_filters"):
583
+ for f in self._filters:
584
+ f.apply(self._instances)
585
+ else:
586
+ eval_logger.warning("No filter defined, passing through instances")
587
+ return self._instances
588
+
589
+ def dump_config(self) -> dict:
590
+ """Returns the config as a dictionary."""
591
+ # TODO: this should only return the overrides applied to a non-YAML task's configuration.
592
+ # (num_fewshot)
593
+ return self.config.to_dict()
594
+
595
+ def set_config(self, key: str, value: Any, update: bool = False) -> None:
596
+ """Set or update the configuration for a given key."""
597
+ if key is None:
598
+ raise ValueError("Key must be provided.")
599
+
600
+ if update:
601
+ current_value = getattr(self._config, key, {})
602
+ if not isinstance(current_value, dict):
603
+ raise TypeError(
604
+ f"Expected a dict for key '{key}', got {type(current_value).__name__} instead."
605
+ )
606
+ current_value.update(value)
607
+ else:
608
+ setattr(self._config, key, value)
609
+
610
+ def override_metric(self, metric_name: str) -> None:
611
+ """
612
+ Override the default metrics used for evaluation with custom metrics.
613
+
614
+ Parameters:
615
+ - metric_name (str): The name of the custom metric to override. Should be registered in api.metrics.
616
+ """
617
+ (
618
+ self._metric_fn_list,
619
+ self._aggregation_list,
620
+ self._metric_fn_kwargs,
621
+ self._higher_is_better,
622
+ ) = ({}, {}, {}, {})
623
+ self._metric_fn_list[metric_name] = get_metric(metric_name)
624
+ self._aggregation_list[metric_name] = get_metric_aggregation(metric_name)
625
+ self._higher_is_better[metric_name] = is_higher_better(metric_name)
626
+ self._metric_fn_kwargs[metric_name] = {}
627
+ if not isinstance(self, ConfigurableTask):
628
+ self.process_results = lambda x, y: {metric_name: get_metric(metric_name)}
629
+ self.aggregation = lambda: {
630
+ metric_name: get_metric_aggregation(metric_name)
631
+ }
632
+ setattr(self._config, "metric_list", [{"metric": metric_name}])
633
+ setattr(self._config, "process_results", None)
634
+
635
+ @property
636
+ def eval_docs(self) -> Union[datasets.Dataset, List[dict]]:
637
+ if self.has_test_docs():
638
+ return self.test_docs()
639
+ elif self.has_validation_docs():
640
+ return self.validation_docs()
641
+ else:
642
+ raise ValueError(
643
+ f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
644
+ )
645
+
646
+ def doc_iterator(
647
+ self, *, rank: int = 0, limit: Union[int, None] = None, world_size: int = 1
648
+ ) -> Iterator[Tuple[int, Any]]:
649
+ limit = int(limit) if limit else None
650
+ doc_iterator = utils.create_iterator(
651
+ enumerate(self.eval_docs),
652
+ rank=int(rank),
653
+ limit=limit,
654
+ world_size=int(world_size),
655
+ )
656
+ return doc_iterator
657
+
658
+
659
+ class ConfigurableTask(Task):
660
+ VERSION = "Yaml"
661
+ OUTPUT_TYPE = None
662
+ CONFIG = None
663
+
664
+ def __init__(
665
+ self,
666
+ data_dir=None,
667
+ cache_dir=None,
668
+ download_mode=None,
669
+ config: Optional[dict] = None,
670
+ ) -> None: # TODO no super() call here
671
+ # Get pre-configured attributes
672
+ self._config = self.CONFIG
673
+
674
+ # Use new configurations if there was no preconfiguration
675
+ if self.config is None:
676
+ self._config = TaskConfig(**config)
677
+ # Overwrite configs
678
+ else:
679
+ if config is not None:
680
+ self._config.__dict__.update(config)
681
+
682
+ if self.config is None:
683
+ raise ValueError(
684
+ "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
685
+ )
686
+
687
+ if isinstance(self.config.metadata, dict):
688
+ if "version" in self.config.metadata:
689
+ self.VERSION = self.config.metadata["version"]
690
+
691
+ if self.config.output_type is not None:
692
+ if self.config.output_type not in ALL_OUTPUT_TYPES:
693
+ raise ValueError(
694
+ f"Got invalid output_type '{self.config.output_type}', must be in '{','.join(ALL_OUTPUT_TYPES)}'"
695
+ )
696
+ self.OUTPUT_TYPE = self.config.output_type
697
+
698
+ if self.config.dataset_path is not None:
699
+ self.DATASET_PATH = self.config.dataset_path
700
+
701
+ if self.config.dataset_name is not None:
702
+ self.DATASET_NAME = self.config.dataset_name
703
+
704
+ self._metric_fn_list = {}
705
+ self._metric_fn_kwargs = {}
706
+ self._aggregation_list = {}
707
+ self._higher_is_better = {}
708
+
709
+ if self.config.metric_list is None:
710
+ # TODO: handle this in TaskConfig.__post_init__ ?
711
+ _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type]
712
+
713
+ for metric_name in _metric_list:
714
+ self._metric_fn_list[metric_name] = get_metric(metric_name)
715
+ self._metric_fn_kwargs[metric_name] = {}
716
+ self._aggregation_list[metric_name] = get_metric_aggregation(
717
+ metric_name
718
+ )
719
+ self._higher_is_better[metric_name] = is_higher_better(metric_name)
720
+ else:
721
+ for metric_config in self.config.metric_list:
722
+ if "metric" not in metric_config:
723
+ raise ValueError(
724
+ "'metric' key not provided for an entry in 'metric_list', must be specified!"
725
+ )
726
+ metric_name = metric_config["metric"]
727
+ kwargs = {
728
+ key: metric_config[key]
729
+ for key in metric_config
730
+ if key
731
+ not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
732
+ }
733
+ hf_evaluate_metric = (
734
+ "hf_evaluate" in metric_config
735
+ and metric_config["hf_evaluate"] is True
736
+ )
737
+
738
+ if self.config.process_results is not None:
739
+ self._metric_fn_list[metric_name] = None
740
+ self._metric_fn_kwargs[metric_name] = {}
741
+ elif callable(metric_name):
742
+ metric_fn = metric_name.__call__
743
+ metric_name = metric_name.__name__
744
+ self._metric_fn_list[metric_name] = metric_fn
745
+ self._metric_fn_kwargs[metric_name] = kwargs
746
+ else:
747
+ self._metric_fn_list[metric_name] = get_metric(
748
+ metric_name, hf_evaluate_metric
749
+ )
750
+ self._metric_fn_kwargs[metric_name] = kwargs
751
+
752
+ if "aggregation" in metric_config:
753
+ agg_name = metric_config["aggregation"]
754
+ if isinstance(agg_name, str):
755
+ self._aggregation_list[metric_name] = get_aggregation(agg_name)
756
+ elif callable(agg_name): # noqa: E721
757
+ self._aggregation_list[metric_name] = metric_config[
758
+ "aggregation"
759
+ ]
760
+ else:
761
+ INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
762
+ metric_agg = get_metric_aggregation(metric_name)
763
+ eval_logger.warning(
764
+ f"[Task: {self.config.task}] metric {metric_name} is defined, but aggregation is not. "
765
+ f"using default "
766
+ f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
767
+ )
768
+ self._aggregation_list[metric_name] = metric_agg
769
+
770
+ if "higher_is_better" in metric_config:
771
+ self._higher_is_better[metric_name] = metric_config[
772
+ "higher_is_better"
773
+ ]
774
+ else:
775
+ eval_logger.warning(
776
+ f"[Task: {self.config.task}] metric {metric_name} is defined, but higher_is_better is not. "
777
+ f"using default "
778
+ f"higher_is_better={is_higher_better(metric_name)}"
779
+ )
780
+ self._higher_is_better[metric_name] = is_higher_better(metric_name)
781
+
782
+ self.download(self.config.dataset_kwargs)
783
+ self._training_docs = None
784
+ self._fewshot_docs = None
785
+
786
+ if self.config.filter_list is not None:
787
+ self._filters = []
788
+ for filter_config in self.config.filter_list:
789
+ filter_name = filter_config["name"]
790
+ filter_functions = filter_config["filter"]
791
+ components = []
792
+ for function in filter_functions:
793
+ kwargs = {
794
+ key: function[key] for key in function if key != "function"
795
+ }
796
+ components.append([function["function"], kwargs])
797
+ filter_pipeline = build_filter_ensemble(filter_name, components)
798
+ self._filters.append(filter_pipeline)
799
+ else:
800
+ self._filters = [build_filter_ensemble("none", [["take_first", None]])]
801
+
802
+ if self.config.use_prompt is not None:
803
+ eval_logger.info(f"loading prompt {self.config.use_prompt}")
804
+ self.prompt = get_prompt(
805
+ self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
806
+ )
807
+ else:
808
+ self.prompt = None
809
+
810
+ if self.fewshot_docs() is not None:
811
+ self.sampler = samplers.get_sampler(
812
+ self.config.fewshot_config.get("sampler", "default")
813
+ if self.config.fewshot_config
814
+ else "default"
815
+ )(list(self.fewshot_docs()), self, rnd=random.Random(1234))
816
+
817
+ self.task_docs = self.eval_docs
818
+
819
+ # Test One Doc
820
+ self.features = list(self.task_docs.features.keys())
821
+ self.multiple_input = 0
822
+ self.multiple_target = 0
823
+ test_doc = self.task_docs[0]
824
+ test_text = self.doc_to_text(test_doc)
825
+ test_target = self.doc_to_target(test_doc)
826
+
827
+ if self.config.doc_to_choice is not None:
828
+ test_choice = self.doc_to_choice(test_doc)
829
+ if not isinstance(test_choice, list):
830
+ eval_logger.error("doc_to_choice must return list")
831
+ else:
832
+ num_choice = len(test_choice)
833
+
834
+ if isinstance(test_text, int):
835
+ self.multiple_input = num_choice
836
+ else:
837
+ test_choice = None
838
+
839
+ if isinstance(test_target, list):
840
+ self.multiple_target = len(test_target)
841
+ else:
842
+ if (isinstance(test_target, int)) and (test_choice is not None):
843
+ test_target = test_choice[test_target]
844
+ else:
845
+ test_target = str(test_target)
846
+
847
+ if test_choice is not None:
848
+ check_choices = test_choice
849
+ else:
850
+ check_choices = [test_target]
851
+ if self.config.doc_to_choice is not None:
852
+ for choice in check_choices:
853
+ choice_has_whitespace = True if choice[0].isspace() else False
854
+ delimiter_has_whitespace = (
855
+ True
856
+ if self.config.target_delimiter.rstrip()
857
+ != self.config.target_delimiter
858
+ else False
859
+ )
860
+
861
+ if delimiter_has_whitespace and choice_has_whitespace:
862
+ eval_logger.debug(
863
+ f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace'
864
+ )
865
+ elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
866
+ eval_logger.debug(
867
+ f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
868
+ )
869
+
870
+ def download(self, dataset_kwargs: Optional[Dict[str, Any]] = None) -> None:
871
+ self.dataset = datasets.load_dataset(
872
+ path=self.DATASET_PATH,
873
+ name=self.DATASET_NAME,
874
+ **dataset_kwargs if dataset_kwargs is not None else {},
875
+ )
876
+
877
+ def has_training_docs(self) -> bool:
878
+ if self.config.training_split is not None:
879
+ return True
880
+ else:
881
+ return False
882
+
883
+ def has_validation_docs(self) -> bool:
884
+ if self.config.validation_split is not None:
885
+ return True
886
+ else:
887
+ return False
888
+
889
+ def has_test_docs(self) -> bool:
890
+ if self.config.test_split is not None:
891
+ return True
892
+ else:
893
+ return False
894
+
895
+ def training_docs(self) -> datasets.Dataset:
896
+ if self.has_training_docs():
897
+ if self.config.process_docs is not None:
898
+ return self.config.process_docs(
899
+ self.dataset[self.config.training_split]
900
+ )
901
+ return self.dataset[self.config.training_split]
902
+
903
+ def validation_docs(self) -> datasets.Dataset:
904
+ if self.has_validation_docs():
905
+ if self.config.process_docs is not None:
906
+ return self.config.process_docs(
907
+ self.dataset[self.config.validation_split]
908
+ )
909
+ return self.dataset[self.config.validation_split]
910
+
911
+ def test_docs(self) -> datasets.Dataset:
912
+ if self.has_test_docs():
913
+ if self.config.process_docs is not None:
914
+ return self.config.process_docs(self.dataset[self.config.test_split])
915
+ return self.dataset[self.config.test_split]
916
+
917
+ def fewshot_docs(self):
918
+ if self.config.fewshot_split is not None:
919
+ if self.config.process_docs is not None:
920
+ return self.config.process_docs(self.dataset[self.config.fewshot_split])
921
+ return self.dataset[self.config.fewshot_split]
922
+ else:
923
+ if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0):
924
+ eval_logger.warning(
925
+ f"Task '{self.config.task}': "
926
+ "num_fewshot > 0 but fewshot_split is None. "
927
+ "using preconfigured rule."
928
+ )
929
+ return super().fewshot_docs()
930
+
931
+ @utils.positional_deprecated
932
+ def fewshot_context(self, doc: str, num_fewshot: int) -> str:
933
+ """Returns a fewshot context string that is made up of a prepended description
934
+ (if provided), the `num_fewshot` number of examples, and an appended prompt example.
935
+
936
+ :param doc: str
937
+ The document as returned from training_docs, validation_docs, or test_docs.
938
+ :param num_fewshot: int
939
+ The number of fewshot examples to provide in the returned context string.
940
+ :returns: str
941
+ The fewshot context.
942
+ """
943
+ if description := self.config.description:
944
+ description = utils.apply_template(self.config.description, doc)
945
+
946
+ if num_fewshot == 0:
947
+ # always prepend the (possibly empty) task description
948
+ labeled_examples = description
949
+ else:
950
+ labeled_examples = description + self.sampler.get_context(doc, num_fewshot)
951
+
952
+ example = self.doc_to_text(doc)
953
+ if self.multiple_input:
954
+ return labeled_examples
955
+ else:
956
+ if isinstance(example, str):
957
+ return labeled_examples + example
958
+ elif isinstance(example, list):
959
+ return [labeled_examples + ex for ex in example]
960
+ elif isinstance(example, int):
961
+ if self.config.doc_to_choice is not None:
962
+ choices = self.doc_to_choice(doc)
963
+ return labeled_examples + choices[example]
964
+ else:
965
+ return labeled_examples + str(example)
966
+
967
+ def apply_filters(self):
968
+ """Iterates over FilterEnsembles and applies them to instances"""
969
+ if hasattr(self, "_filters"):
970
+ for f in self._filters:
971
+ f.apply(self._instances)
972
+ else:
973
+ eval_logger.warning("No filter defined, passing through instances")
974
+ return self._instances
975
+
976
+ def should_decontaminate(self):
977
+ return self.config.should_decontaminate
978
+
979
+ def doc_to_decontamination_query(self, doc):
980
+ if self.config.should_decontaminate:
981
+ if self.config.doc_to_decontamination_query is None:
982
+ return self.doc_to_text(doc)
983
+ else:
984
+ doc_to_decontamination_query = self.config.doc_to_decontamination_query
985
+ if doc_to_decontamination_query in self.features:
986
+ return doc[doc_to_decontamination_query]
987
+ elif callable(doc_to_decontamination_query):
988
+ return doc_to_decontamination_query(doc)
989
+ else:
990
+ return ast.literal_eval(
991
+ utils.apply_template(
992
+ self.config.doc_to_decontamination_query, doc
993
+ )
994
+ )
995
+
996
+ def _process_doc(self, doc: dict) -> dict:
997
+ """
998
+ Override this to process (detokenize, strip, replace, etc.) individual
999
+ documents. This can be used in a map over documents of a data split.
1000
+ E.g. `map(self._process_doc, self.dataset["validation"])`
1001
+
1002
+ :return: dict
1003
+ The processed version of the specified `doc`.
1004
+ """
1005
+ return doc
1006
+
1007
+ def doc_to_text(self, doc):
1008
+ if self.prompt is not None:
1009
+ doc_to_text = self.prompt
1010
+ else:
1011
+ doc_to_text = self.config.doc_to_text
1012
+
1013
+ if isinstance(doc_to_text, int):
1014
+ return doc_to_text
1015
+ elif isinstance(doc_to_text, str):
1016
+ if doc_to_text in self.features:
1017
+ # if self.config.doc_to_choice is not None:
1018
+ # return self.doc_to_choice(doc)[doc[doc_to_text]]
1019
+ # else:
1020
+ return doc[doc_to_text]
1021
+ else:
1022
+ text_string = utils.apply_template(doc_to_text, doc)
1023
+ if text_string.isdigit() and self._config.doc_to_choice is not None:
1024
+ return ast.literal_eval(text_string)
1025
+ else:
1026
+ return text_string
1027
+ elif callable(doc_to_text):
1028
+ return doc_to_text(doc)
1029
+ # Used when applying a Promptsource template
1030
+ elif hasattr(doc_to_text, "apply"):
1031
+ applied_prompt = doc_to_text.apply(doc)
1032
+ if len(applied_prompt) == 2:
1033
+ return applied_prompt[0]
1034
+ else:
1035
+ eval_logger.warning("Applied prompt returns empty string")
1036
+ return self.config.fewshot_delimiter
1037
+ else:
1038
+ print(type(doc_to_text))
1039
+ raise TypeError
1040
+
1041
+ def doc_to_target(self, doc: Mapping) -> Union[int, str, list]:
1042
+ if self.prompt is not None:
1043
+ doc_to_target = self.prompt
1044
+ else:
1045
+ doc_to_target = self.config.doc_to_target
1046
+
1047
+ if isinstance(doc_to_target, int):
1048
+ return doc_to_target
1049
+ elif isinstance(doc_to_target, str):
1050
+ if doc_to_target in self.features:
1051
+ # if self.config.doc_to_choice is not None:
1052
+ # return self.doc_to_choice(doc)[doc[doc_to_target]]
1053
+ # else:
1054
+ return doc[doc_to_target]
1055
+ else:
1056
+ target_string = utils.apply_template(doc_to_target, doc)
1057
+ if target_string.isdigit() and self._config.doc_to_choice is not None:
1058
+ return ast.literal_eval(target_string)
1059
+ elif (
1060
+ len(target_string) >= 2
1061
+ and (target_string[0] == "[")
1062
+ and (target_string[-1] == "]")
1063
+ ):
1064
+ try:
1065
+ return ast.literal_eval(target_string)
1066
+ except (SyntaxError, ValueError):
1067
+ return target_string
1068
+ else:
1069
+ return target_string
1070
+ elif isinstance(doc_to_target, list):
1071
+ return doc_to_target
1072
+ elif callable(doc_to_target):
1073
+ return doc_to_target(doc)
1074
+ # Used when applying a Promptsource template
1075
+ elif hasattr(doc_to_target, "apply"):
1076
+ applied_prompt = doc_to_target.apply(doc)
1077
+ if len(applied_prompt) == 2:
1078
+ return applied_prompt[1]
1079
+ else:
1080
+ eval_logger.warning("Applied prompt returns empty string")
1081
+ return self.config.fewshot_delimiter
1082
+ else:
1083
+ raise TypeError
1084
+
1085
+ def doc_to_choice(self, doc: Any) -> List[str]:
1086
+ if self.prompt is not None:
1087
+ doc_to_choice = self.prompt
1088
+ elif self.config.doc_to_choice is None:
1089
+ eval_logger.error("doc_to_choice was called but not set in config")
1090
+ else:
1091
+ doc_to_choice = self.config.doc_to_choice
1092
+
1093
+ if isinstance(doc_to_choice, str):
1094
+ if doc_to_choice in self.features:
1095
+ return doc[doc_to_choice]
1096
+ else:
1097
+ return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
1098
+ elif isinstance(doc_to_choice, list):
1099
+ return doc_to_choice
1100
+ elif isinstance(doc_to_choice, dict):
1101
+ return list(doc_to_choice.values())
1102
+ elif callable(doc_to_choice):
1103
+ return doc_to_choice(doc)
1104
+ elif hasattr(doc_to_choice, "get_answer_choices_list"):
1105
+ return doc_to_choice.get_answer_choices_list(doc)
1106
+ else:
1107
+ raise TypeError
1108
+
1109
+ def construct_requests(
1110
+ self, doc: dict, ctx: str, **kwargs
1111
+ ) -> Union[List[Instance], Instance]:
1112
+ if self.OUTPUT_TYPE == "loglikelihood":
1113
+ arguments = (ctx, self.doc_to_target(doc))
1114
+ elif self.OUTPUT_TYPE == "loglikelihood_rolling":
1115
+ arguments = (self.doc_to_target(doc),)
1116
+ elif self.OUTPUT_TYPE == "multiple_choice":
1117
+ choices = self.doc_to_choice(doc)
1118
+ target_delimiter = self.config.target_delimiter
1119
+ if self.multiple_input:
1120
+ # If there are multiple inputs, choices are placed in the ctx
1121
+ cont = self.doc_to_target(doc)
1122
+ arguments = [
1123
+ (ctx + choice, f"{target_delimiter}{cont}") for choice in choices
1124
+ ]
1125
+ else:
1126
+ # Otherwise they are placed in the continuation
1127
+ arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
1128
+
1129
+ request_list = [
1130
+ Instance(
1131
+ request_type="loglikelihood",
1132
+ doc=doc,
1133
+ arguments=arg,
1134
+ idx=i,
1135
+ **kwargs,
1136
+ )
1137
+ for i, arg in enumerate(arguments)
1138
+ ]
1139
+ # TODO: we should raise a warning telling users this will at most ~2x runtime.
1140
+ if "acc_mutual_info" in self._metric_fn_list.keys():
1141
+ # if we are calculating multiple choice accuracy
1142
+ # using mutual information instead of raw loglikelihood as metric, need unconditional lls.
1143
+
1144
+ # here mutual info refers to calculating
1145
+ # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
1146
+ # in other words normalizing by subtracting the unconditional logprob of each choice.
1147
+ request_list.extend(
1148
+ [
1149
+ Instance(
1150
+ request_type="loglikelihood",
1151
+ doc=doc,
1152
+ arguments=("", "{}".format(choice)),
1153
+ idx=i,
1154
+ **kwargs,
1155
+ )
1156
+ for i, choice in enumerate(choices)
1157
+ ]
1158
+ )
1159
+ return request_list
1160
+
1161
+ elif self.OUTPUT_TYPE == "generate_until":
1162
+ arguments = (ctx, deepcopy(self.config.generation_kwargs))
1163
+
1164
+ return Instance(
1165
+ request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
1166
+ )
1167
+
1168
+ def process_results(self, doc, results):
1169
+ if callable(self.config.process_results):
1170
+ return self.config.process_results(doc, results)
1171
+
1172
+ result_dict = {}
1173
+ use_metric = list(self._metric_fn_list.keys())
1174
+ if self.OUTPUT_TYPE == "loglikelihood":
1175
+ results = results[0]
1176
+ ll, is_greedy = results
1177
+ return {
1178
+ **({"perplexity": ll} if "perplexity" in use_metric else {}),
1179
+ **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
1180
+ }
1181
+ elif self.OUTPUT_TYPE == "loglikelihood_rolling":
1182
+ (loglikelihood,) = results
1183
+ _words = self.count_words(self.doc_to_target(doc))
1184
+ _bytes = self.count_bytes(self.doc_to_target(doc))
1185
+ return {
1186
+ **(
1187
+ {"word_perplexity": (loglikelihood, _words)}
1188
+ if "word_perplexity" in use_metric
1189
+ else {}
1190
+ ),
1191
+ **(
1192
+ {"byte_perplexity": (loglikelihood, _bytes)}
1193
+ if "byte_perplexity" in use_metric
1194
+ else {}
1195
+ ),
1196
+ **(
1197
+ {"bits_per_byte": (loglikelihood, _bytes)}
1198
+ if "bits_per_byte" in use_metric
1199
+ else {}
1200
+ ),
1201
+ }
1202
+ elif self.OUTPUT_TYPE == "multiple_choice":
1203
+ lls, is_greedy = zip(*results)
1204
+
1205
+ # retrieve choices in List[str] form, to compute choice lengths, etc.
1206
+ choices = self.doc_to_choice(doc)
1207
+ completion_len = np.array([float(len(i)) for i in choices])
1208
+
1209
+ if (
1210
+ 2 * len(choices) == len(lls)
1211
+ and "acc_mutual_info" in self._metric_fn_list.keys()
1212
+ ):
1213
+ # then we are doing mutual info.
1214
+ # this stores the "dryrun" / unconditional answer loglikelihoods
1215
+ lls_unconditional = lls[1::2]
1216
+ if len(lls_unconditional) != len(choices):
1217
+ raise ValueError
1218
+ # and this stores our "regular" conditional loglikelihoods
1219
+ lls = lls[::2]
1220
+
1221
+ pred = np.argmax(lls)
1222
+ pred_norm = np.argmax(lls / completion_len)
1223
+
1224
+ if self.multiple_input:
1225
+ gold = self.doc_to_text(doc)
1226
+ else:
1227
+ gold = self.doc_to_target(doc)
1228
+
1229
+ gold_index_error = False
1230
+ if isinstance(gold, list):
1231
+ gold = [i if i < len(choices) else -100 for i in gold]
1232
+ if -100 in gold:
1233
+ gold_index_error = True
1234
+ else:
1235
+ if isinstance(gold, int):
1236
+ gold = gold if gold < len(choices) else -100
1237
+ elif isinstance(gold, str):
1238
+ gold = choices.index(gold) if gold in choices else -100
1239
+
1240
+ if gold == -100:
1241
+ gold_index_error = True
1242
+
1243
+ if gold_index_error:
1244
+ eval_logger.warning(
1245
+ f"Label index was not in within range of available choices,"
1246
+ f"Sample:\n\n{doc}\n\n"
1247
+ )
1248
+
1249
+ if self.multiple_target:
1250
+ acc = 1.0 if pred in gold else 0.0
1251
+ acc_norm = 1.0 if pred_norm in gold else 0.0
1252
+ exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
1253
+ else:
1254
+ acc = 1.0 if pred == gold else 0.0
1255
+ acc_norm = 1.0 if pred_norm == gold else 0.0
1256
+ # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
1257
+ exact_match = int(is_greedy[gold]) if gold != -100 else 0
1258
+
1259
+ prob_norm = utils.softmax(lls)
1260
+
1261
+ # TODO use keyword arguments to the metric?
1262
+ # gold, pred, norm stuff, the original lls,
1263
+ result_dict = {
1264
+ **({"acc": acc} if "acc" in use_metric else {}),
1265
+ **({"f1": (gold, pred)} if "f1" in use_metric else {}),
1266
+ **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1267
+ **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1268
+ **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
1269
+ **(
1270
+ {"brier_score": (gold, prob_norm)}
1271
+ if "brier_score" in use_metric
1272
+ else {}
1273
+ ),
1274
+ }
1275
+
1276
+ if "acc_mutual_info" in use_metric:
1277
+ lls_mutual_info = [
1278
+ ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
1279
+ ]
1280
+ acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
1281
+ result_dict["acc_mutual_info"] = acc_mutual_info
1282
+
1283
+ elif self.OUTPUT_TYPE == "generate_until":
1284
+ gold = self.doc_to_target(doc)
1285
+ result = results[0]
1286
+ if self.config.doc_to_choice is not None:
1287
+ # If you set doc_to_choice,
1288
+ # it assumes that doc_to_target returns a number.
1289
+ choices = self.doc_to_choice(doc)
1290
+ gold = choices[gold]
1291
+ # we expect multiple_targets to be a list.
1292
+ elif self.multiple_target:
1293
+ gold = list(gold)
1294
+ elif type(gold) != type(result):
1295
+ # cast gold to the same type as result
1296
+ gold = type(result)(gold)
1297
+
1298
+ for metric in self._metric_fn_list.keys():
1299
+ if self.multiple_target:
1300
+ # in the case where we have multiple targets,
1301
+ # return true if any are true
1302
+ # TODO: this may break for multipLe_target, non zero-or-1 metrics
1303
+ scores = []
1304
+ if not isinstance(gold, list):
1305
+ # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
1306
+ # print(gold)
1307
+ gold = [gold]
1308
+ if metric == "exact_match":
1309
+ result = [result for _ in range(len(gold))]
1310
+ scores = self._metric_fn_list[metric](
1311
+ references=gold,
1312
+ predictions=result,
1313
+ **self._metric_fn_kwargs[metric],
1314
+ )[metric]
1315
+ result_score = 1.0 if scores > 0.0 else 0.0
1316
+ else:
1317
+ for gold_option in gold:
1318
+ try:
1319
+ result_score = self._metric_fn_list[metric](
1320
+ references=[gold_option],
1321
+ predictions=[result],
1322
+ **self._metric_fn_kwargs[metric],
1323
+ )
1324
+ except (
1325
+ TypeError
1326
+ ): # TODO: this is hacky and I don't want to do it
1327
+ result_score = self._metric_fn_list[metric](
1328
+ [gold_option, result]
1329
+ )
1330
+ if isinstance(result_score, dict):
1331
+ # TODO: this handles the case where HF evaluate returns a dict.
1332
+ result_score = result_score[metric]
1333
+ scores.append(result_score)
1334
+ if any(scores):
1335
+ result_score = 1.0
1336
+ else:
1337
+ result_score = 0.0
1338
+ else:
1339
+ try:
1340
+ result_score = self._metric_fn_list[metric](
1341
+ references=[gold],
1342
+ predictions=[result],
1343
+ **self._metric_fn_kwargs[metric],
1344
+ )
1345
+ except TypeError: # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
1346
+ result_score = self._metric_fn_list[metric]([gold, result])
1347
+ if isinstance(result_score, dict):
1348
+ # TODO: this handles the case where HF evaluate returns a dict.
1349
+ result_score = result_score[metric]
1350
+ result_dict[metric] = result_score
1351
+ else:
1352
+ raise ValueError(
1353
+ f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1354
+ "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'",
1355
+ )
1356
+
1357
+ return result_dict
1358
+
1359
+ def aggregation(self) -> dict:
1360
+ return self._aggregation_list
1361
+
1362
+ def higher_is_better(self) -> dict:
1363
+ return self._higher_is_better
1364
+
1365
+ def get_config(self, key: str) -> Any:
1366
+ return getattr(self._config, key, None)
1367
+
1368
+ def __repr__(self):
1369
+ return (
1370
+ f"ConfigurableTask(task_name={getattr(self.config, 'task', None)},"
1371
+ f"group_name={getattr(self.config, 'group', None)},"
1372
+ f"output_type={self.OUTPUT_TYPE},"
1373
+ f"num_fewshot={getattr(self.config, 'num_fewshot', None)},"
1374
+ f"num_samples={len(self.eval_docs)})"
1375
+ )
1376
+
1377
+
1378
+ class MultipleChoiceTask(Task):
1379
+ OUTPUT_TYPE = "loglikelihood"
1380
+
1381
+ def doc_to_target(self, doc: dict) -> str:
1382
+ return " " + doc["choices"][doc["gold"]]
1383
+
1384
+ def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1385
+ # TODO: add mutual info here?
1386
+ return [
1387
+ Instance(
1388
+ request_type="loglikelihood",
1389
+ doc=doc,
1390
+ arguments=(ctx, " {}".format(choice)),
1391
+ idx=i,
1392
+ **kwargs,
1393
+ )
1394
+ for i, choice in enumerate(doc["choices"])
1395
+ ]
1396
+
1397
+ def process_results(self, doc: dict, results: Iterable[Tuple[float, bool]]) -> dict:
1398
+ results = [
1399
+ res[0] for res in results
1400
+ ] # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1401
+ gold = doc["gold"]
1402
+
1403
+ acc = 1.0 if np.argmax(results) == gold else 0.0
1404
+ completion_len = np.array([float(len(i)) for i in doc["choices"]])
1405
+ acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0
1406
+
1407
+ return {
1408
+ "acc": acc,
1409
+ "acc_norm": acc_norm,
1410
+ }
1411
+
1412
+ def higher_is_better(self) -> dict:
1413
+ return {
1414
+ "acc": True,
1415
+ "acc_norm": True,
1416
+ }
1417
+
1418
+ def aggregation(self) -> dict:
1419
+ return {
1420
+ "acc": mean,
1421
+ "acc_norm": mean,
1422
+ }
1423
+
1424
+
1425
+ class PerplexityTask(Task):
1426
+ OUTPUT_TYPE = "loglikelihood_rolling"
1427
+
1428
+ def has_training_docs(self) -> bool:
1429
+ return False
1430
+
1431
+ def fewshot_examples(self, k: int, rnd) -> List:
1432
+ if k != 0:
1433
+ raise ValueError(
1434
+ "The number of fewshot examples must be 0 for perplexity tasks."
1435
+ )
1436
+ return []
1437
+
1438
+ def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1439
+ if num_fewshot != 0:
1440
+ raise ValueError(
1441
+ "The number of fewshot examples must be 0 for perplexity tasks."
1442
+ )
1443
+
1444
+ return ""
1445
+
1446
+ def higher_is_better(self) -> dict:
1447
+ return {
1448
+ "word_perplexity": False,
1449
+ "byte_perplexity": False,
1450
+ "bits_per_byte": False,
1451
+ }
1452
+
1453
+ def doc_to_decontamination_query(self, doc):
1454
+ return doc
1455
+
1456
+ def doc_to_text(self, doc) -> str:
1457
+ return ""
1458
+
1459
+ def doc_to_target(self, doc):
1460
+ return doc
1461
+
1462
+ def construct_requests(self, doc: dict, ctx: Optional[str], **kwargs):
1463
+ if bool(ctx):
1464
+ raise ValueError
1465
+
1466
+ return Instance(
1467
+ request_type=self.OUTPUT_TYPE,
1468
+ doc=doc,
1469
+ arguments=(self.doc_to_target(doc),),
1470
+ idx=0,
1471
+ **kwargs,
1472
+ )
1473
+
1474
+ def process_results(self, doc: dict, results: Tuple[float]) -> dict:
1475
+ (loglikelihood,) = results
1476
+ words = self.count_words(self.doc_to_target(doc))
1477
+ bytes_ = self.count_bytes(self.doc_to_target(doc))
1478
+ return {
1479
+ "word_perplexity": (loglikelihood, words),
1480
+ "byte_perplexity": (loglikelihood, bytes_),
1481
+ "bits_per_byte": (loglikelihood, bytes_),
1482
+ }
1483
+
1484
+ def aggregation(self) -> dict:
1485
+ return {
1486
+ "word_perplexity": weighted_perplexity,
1487
+ "byte_perplexity": weighted_perplexity,
1488
+ "bits_per_byte": bits_per_byte,
1489
+ }
1490
+
1491
+ @classmethod
1492
+ def count_bytes(cls, doc) -> int:
1493
+ return len(doc.encode("utf-8"))
1494
+
1495
+ @classmethod
1496
+ def count_words(cls, doc) -> int:
1497
+ """Downstream tasks with custom word boundaries should override this!"""
1498
+ return len(re.split(r"\s+", doc))
lm-evaluation-harness/lm_eval/caching/__pycache__/cache.cpython-310.pyc ADDED
Binary file (1.6 kB). View file
 
lm-evaluation-harness/lm_eval/caching/cache.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import os
3
+
4
+ import dill
5
+
6
+ from lm_eval.utils import eval_logger
7
+
8
+
9
+ MODULE_DIR = os.path.dirname(os.path.realpath(__file__))
10
+
11
+ OVERRIDE_PATH = os.getenv("LM_HARNESS_CACHE_PATH")
12
+
13
+
14
+ PATH = OVERRIDE_PATH if OVERRIDE_PATH else f"{MODULE_DIR}/.cache"
15
+
16
+ # This should be sufficient for uniqueness
17
+ HASH_INPUT = "EleutherAI-lm-evaluation-harness"
18
+
19
+ HASH_PREFIX = hashlib.sha256(HASH_INPUT.encode("utf-8")).hexdigest()
20
+
21
+ FILE_SUFFIX = f".{HASH_PREFIX}.pickle"
22
+
23
+
24
+ def load_from_cache(file_name):
25
+ try:
26
+ path = f"{PATH}/{file_name}{FILE_SUFFIX}"
27
+
28
+ with open(path, "rb") as file:
29
+ cached_task_dict = dill.loads(file.read())
30
+ return cached_task_dict
31
+
32
+ except Exception:
33
+ eval_logger.debug(f"{file_name} is not cached, generating...")
34
+ pass
35
+
36
+
37
+ def save_to_cache(file_name, obj):
38
+ if not os.path.exists(PATH):
39
+ os.mkdir(PATH)
40
+
41
+ file_path = f"{PATH}/{file_name}{FILE_SUFFIX}"
42
+
43
+ eval_logger.debug(f"Saving {file_path} to cache...")
44
+ with open(file_path, "wb") as file:
45
+ file.write(dill.dumps(obj))
46
+
47
+
48
+ # NOTE the "key" param is to allow for flexibility
49
+ def delete_cache(key: str = ""):
50
+ files = os.listdir(PATH)
51
+
52
+ for file in files:
53
+ if file.startswith(key) and file.endswith(FILE_SUFFIX):
54
+ file_path = f"{PATH}/{file}"
55
+ os.unlink(file_path)
lm-evaluation-harness/lm_eval/filters/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import List, Union
3
+
4
+ from lm_eval.api.filter import FilterEnsemble
5
+
6
+ from . import extraction, selection, transformation
7
+
8
+
9
+ FILTER_REGISTRY = {
10
+ "take_first": selection.TakeFirstFilter,
11
+ "regex": extraction.RegexFilter,
12
+ "majority_vote": selection.MajorityVoteFilter,
13
+ "take_first_k": selection.TakeKFilter,
14
+ "remove_whitespace": extraction.WhitespaceFilter,
15
+ "lowercase": transformation.LowercaseFilter,
16
+ "uppercase": transformation.UppercaseFilter,
17
+ "map": transformation.MapFilter,
18
+ "multi_choice_regex": extraction.MultiChoiceRegexFilter,
19
+ # TODO: implement this filter. either it should take in an arbitrary "scoring"/reward function
20
+ # that takes an input and returns a scalar and then should select the max reward,
21
+ # or should implement different filters for different ways of handling a reward model's inference.
22
+ # "arg_max": selection.ArgMaxFilter,
23
+ }
24
+
25
+
26
+ def get_filter(filter_name: str) -> Union[type, str]:
27
+ if filter_name in FILTER_REGISTRY:
28
+ return FILTER_REGISTRY[filter_name]
29
+ else:
30
+ return filter_name
31
+
32
+
33
+ def build_filter_ensemble(
34
+ filter_name: str, components: List[List[str]]
35
+ ) -> FilterEnsemble:
36
+ """
37
+ Create a filtering pipeline.
38
+ """
39
+ filters = []
40
+ for function, kwargs in components:
41
+ if kwargs is None:
42
+ kwargs = {}
43
+ # create a filter given its name in the registry
44
+ f = partial(get_filter(function), **kwargs)
45
+ # add the filter as a pipeline step
46
+ filters.append(f)
47
+
48
+ return FilterEnsemble(name=filter_name, filters=filters)
lm-evaluation-harness/lm_eval/filters/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.27 kB). View file
 
lm-evaluation-harness/lm_eval/filters/__pycache__/extraction.cpython-310.pyc ADDED
Binary file (5.89 kB). View file
 
lm-evaluation-harness/lm_eval/filters/__pycache__/selection.cpython-310.pyc ADDED
Binary file (2.76 kB). View file
 
lm-evaluation-harness/lm_eval/filters/__pycache__/transformation.cpython-310.pyc ADDED
Binary file (3.31 kB). View file
 
lm-evaluation-harness/lm_eval/filters/decontamination.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lm_eval.api.filter import Filter
2
+
3
+
4
+ class DecontaminationFilter(Filter):
5
+
6
+ """
7
+ A filter which evaluates
8
+ """
9
+
10
+ name = "track_decontamination"
11
+
12
+ def __init__(self, path) -> None:
13
+ """
14
+
15
+ TODO: make sure only ever run one time on the train set (should this be cached as a class var? keyed by value for "path").
16
+ should further cache result on a given (task_name, doc_id)
17
+ """
18
+ self._decontam_results = None
19
+
20
+ def apply(self, resps, docs) -> None:
21
+ """
22
+ Return {"no_contamination", "only_contamination"} keys for the 2 different subsets
23
+ """
24
+ pass
lm-evaluation-harness/lm_eval/filters/extraction.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import sys
3
+ import unicodedata
4
+
5
+ from lm_eval.api.filter import Filter
6
+
7
+
8
+ class RegexFilter(Filter):
9
+ """ """
10
+
11
+ def __init__(
12
+ self,
13
+ regex_pattern: str = r"#### (\-?[0-9\.\,]+)",
14
+ group_select=0,
15
+ fallback: str = "[invalid]",
16
+ ) -> None:
17
+ """
18
+ pass a string `regex` to run `re.compile(r"regex")` on.
19
+ `fallback` defines the output returned if no matches for the regex are located.
20
+ """
21
+ self.regex_pattern = regex_pattern
22
+ self.regex = re.compile(regex_pattern)
23
+ self.group_select = group_select
24
+ self.fallback = fallback
25
+
26
+ def apply(self, resps, docs):
27
+ # here, we assume we have a list, in which each element is
28
+ # a list of model responses for some particular input/target pair.
29
+ # so we process each of these (same input/target response sets)
30
+ # independently (and keep them a list.)
31
+ def filter_set(inst):
32
+ filtered = []
33
+ for resp in inst:
34
+ match = self.regex.findall(resp)
35
+ if match:
36
+ match = match[self.group_select]
37
+ if isinstance(match, tuple):
38
+ match = [m for m in match if m][0]
39
+ match = match.strip()
40
+ else:
41
+ match = self.fallback
42
+ filtered.append(match)
43
+ return filtered
44
+
45
+ # print(resps)
46
+ filtered_resps = list(map(lambda x: filter_set(x), resps))
47
+ # print(filtered_resps)
48
+
49
+ return filtered_resps
50
+
51
+
52
+ class WhitespaceFilter(Filter):
53
+ """ """
54
+
55
+ def __init__(self) -> None:
56
+ pass
57
+
58
+ def apply(self, resps, docs):
59
+ def filter_set(inst):
60
+ filtered_resp = []
61
+ for resp in inst:
62
+ if resp.startswith(" "):
63
+ resp = resp[1:]
64
+
65
+ filtered_resp.append(resp)
66
+
67
+ return filtered_resp
68
+
69
+ filtered_resps = [filter_set(resp) for resp in resps]
70
+
71
+ return filtered_resps
72
+
73
+
74
+ class MultiChoiceRegexFilter(RegexFilter):
75
+ """
76
+ A filter used to extract a model's answer on multiple choice questions with
77
+ letter answers. assumes each document has a "choices" field
78
+ containing the list of answer choices and that the answer label symbols
79
+ are of the form (A), (B), (C), ... or A, B, C.
80
+ """
81
+
82
+ def __init__(
83
+ self,
84
+ regex_pattern: str = r"#### (\-?[0-9\.\,]+)",
85
+ group_select=0,
86
+ fallback: str = "[invalid]",
87
+ ignore_case=False,
88
+ ignore_punctuation=False,
89
+ regexes_to_ignore=None,
90
+ ) -> None:
91
+ """
92
+ regex_pattern: The basic regex pattern to use. If fails to match, we will use the customized match procedure
93
+ - step 1 : We parse the choices between ([A-Z])s then try to find these choices in the response.
94
+ - step 2 : We parse the choice with regex :[\s]*([A-?]), where ? varies by number of choices.
95
+ group_select: Selects the (group_select)th match from the findall result.
96
+ ignore_case: Ignores the case during step 1 matching
97
+ ignore_punctuation: Remove the punctuation during step 1 matching
98
+ regexes_to_ignore: Remove these regexes during step 1 matching
99
+ """
100
+ super().__init__(regex_pattern, group_select, fallback)
101
+ self.ignore_case = ignore_case
102
+ self.ignore_punctuation = ignore_punctuation
103
+ self.regexes_to_ignore = regexes_to_ignore
104
+
105
+ def apply(self, resps, docs):
106
+ # here, we assume we have a list, in which each element is
107
+ # a list of model responses for some particular input/target pair.
108
+ # so we process each of these (same input/target response sets)
109
+ # independently (and keep them a list.)
110
+
111
+ def find_match(regex, resp, convert_dict={}):
112
+ match = regex.findall(resp)
113
+ if match:
114
+ match = match[self.group_select]
115
+ if isinstance(match, tuple):
116
+ match = [m for m in match if m][0]
117
+ match = match.strip()
118
+ if match and match in convert_dict:
119
+ match = convert_dict[match]
120
+ return match
121
+
122
+ punct_tbl = dict.fromkeys(
123
+ i
124
+ for i in range(sys.maxunicode)
125
+ if unicodedata.category(chr(i)).startswith("P")
126
+ )
127
+
128
+ def filter_ignores(st):
129
+ if self.regexes_to_ignore is not None:
130
+ for s in self.regexes_to_ignore:
131
+ st = re.sub(s, "", st)
132
+
133
+ if self.ignore_case:
134
+ st = st.lower()
135
+
136
+ if self.ignore_punctuation:
137
+ # https://stackoverflow.com/a/266162
138
+ st = st.translate(punct_tbl)
139
+ return st
140
+
141
+ filtered_resps = []
142
+
143
+ for r, doc in zip(resps, docs):
144
+ fallback_regexes = []
145
+ choice_to_alpha = {}
146
+ next_alpha = "A"
147
+
148
+ without_paren_fallback_regexes = []
149
+ without_paren_to_target = {}
150
+
151
+ choices = doc["choices"]
152
+ for c in choices:
153
+ m = filter_ignores(c.strip())
154
+ fallback_regexes.append(f"{re.escape(m)}")
155
+ choice_to_alpha[m] = f"({next_alpha})"
156
+
157
+ without_paren_fallback_regexes.append(next_alpha)
158
+ without_paren_to_target[next_alpha] = f"({next_alpha})"
159
+
160
+ next_alpha = chr(ord(next_alpha) + 1)
161
+ fallback_regex = re.compile("|".join(fallback_regexes))
162
+ without_paren_fallback_regex = "|".join(without_paren_fallback_regexes)
163
+ without_paren_fallback_regex = re.compile(
164
+ f":[\s]*({without_paren_fallback_regex})"
165
+ )
166
+
167
+ filtered = []
168
+ for resp in r:
169
+ match = find_match(self.regex, resp)
170
+ if not match:
171
+ match = find_match(
172
+ fallback_regex, filter_ignores(resp), choice_to_alpha
173
+ )
174
+ if not match:
175
+ match = find_match(
176
+ without_paren_fallback_regex, resp, without_paren_to_target
177
+ )
178
+ if not match:
179
+ match = self.fallback
180
+ filtered.append(match)
181
+ filtered_resps.append(filtered)
182
+
183
+ return filtered_resps
lm-evaluation-harness/lm_eval/filters/selection.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import Counter
2
+
3
+ from lm_eval.api.filter import Filter
4
+
5
+
6
+ class TakeFirstFilter(Filter):
7
+ def __init__(self) -> None:
8
+ """
9
+ Can define custom behavior here, if an individual instantiation of a Filter class should have state.
10
+ """
11
+
12
+ def apply(self, resps, docs):
13
+ """
14
+ Assuming each entry of `resps` is a list of model responses, we discard all but the first response.
15
+ """
16
+ return map(lambda r: r[0], resps)
17
+
18
+
19
+ class TakeKFilter(Filter):
20
+ def __init__(self, **kwargs) -> None:
21
+ self.k = kwargs.pop("k")
22
+
23
+ super().__init__(**kwargs)
24
+
25
+ def apply(self, resps, docs):
26
+ # need resp to be subscriptable to check below
27
+ resps = list(resps)
28
+ # check we have at least k responses per doc, else we can't take the first k
29
+ assert (
30
+ len(resps[0]) >= self.k
31
+ ), f"Need at least {self.k} responses per doc to take first {self.k}, but got {len(resps[0])} only! Please increase TaskConfig.repeats ."
32
+ return map(lambda r: r[: self.k], resps)
33
+
34
+
35
+ class MajorityVoteFilter(Filter):
36
+ def __init__(self) -> None:
37
+ """
38
+ Can define custom behavior here, if an individual instantiation of a Filter class should have state.
39
+ """
40
+
41
+ def apply(self, resps, docs):
42
+ """
43
+ Each entry of `resps` is a list of model responses.
44
+ We select the response that occurs most frequently in each entry of `resps`.
45
+ """
46
+
47
+ def select_majority(resp):
48
+ counts = Counter(resp)
49
+ vote = counts.most_common(1)[0][0]
50
+ return vote
51
+
52
+ return map(lambda r: [select_majority(r)], resps)
lm-evaluation-harness/lm_eval/filters/transformation.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lm_eval.api.filter import Filter
2
+
3
+
4
+ class LowercaseFilter(Filter):
5
+ def __init__(self) -> None:
6
+ pass
7
+
8
+ def apply(self, resps, docs):
9
+ def filter_set(inst):
10
+ return [resp.lower() for resp in inst]
11
+
12
+ return [filter_set(resp) for resp in resps]
13
+
14
+
15
+ class UppercaseFilter(Filter):
16
+ def __init__(self) -> None:
17
+ pass
18
+
19
+ def apply(self, resps, docs):
20
+ def filter_set(inst):
21
+ return [resp.upper() for resp in inst]
22
+
23
+ return [filter_set(resp) for resp in resps]
24
+
25
+
26
+ class MapFilter(Filter):
27
+ def __init__(self, mapping_dict: dict = None, default_value=None) -> None:
28
+ """
29
+ Initializes the MapFilter with a given mapping dictionary and default value.
30
+
31
+ Args:
32
+ - mapping_dict (dict): A dictionary containing the key-value mappings.
33
+ Default is an empty dictionary.
34
+ - default_value (Any): The value to be returned when a key is not found in the mapping_dict.
35
+ Default is None.
36
+
37
+ Example:
38
+ mapper = MapFilter({'A': 1, 'B': 2}, default_value=0)
39
+ """
40
+ if mapping_dict is None:
41
+ mapping_dict = {}
42
+ assert isinstance(
43
+ mapping_dict, dict
44
+ ), "Provided mapping_dict is not a dictionary"
45
+ self.mapping_dict = mapping_dict
46
+ self.default_value = default_value
47
+
48
+ def apply(self, resps, docs):
49
+ def filter_set(inst):
50
+ return [self.mapping_dict.get(resp, self.default_value) for resp in inst]
51
+
52
+ return [filter_set(resp) for resp in resps]
lm-evaluation-harness/lm_eval/models/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import (
2
+ anthropic_llms,
3
+ dummy,
4
+ gguf,
5
+ huggingface,
6
+ mamba_lm,
7
+ nemo_lm,
8
+ neuralmagic,
9
+ neuron_optimum,
10
+ openai_completions,
11
+ optimum_lm,
12
+ textsynth,
13
+ vllm_causallms,
14
+ )
15
+
16
+
17
+ # TODO: implement __all__
18
+
19
+
20
+ try:
21
+ # enable hf hub transfer if available
22
+ import hf_transfer # type: ignore # noqa
23
+ import huggingface_hub.constants # type: ignore
24
+
25
+ huggingface_hub.constants.HF_HUB_ENABLE_HF_TRANSFER = True
26
+ except ImportError:
27
+ pass
lm-evaluation-harness/lm_eval/models/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (605 Bytes). View file
 
lm-evaluation-harness/lm_eval/models/__pycache__/anthropic_llms.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
lm-evaluation-harness/lm_eval/models/__pycache__/dummy.cpython-310.pyc ADDED
Binary file (1.58 kB). View file
 
lm-evaluation-harness/lm_eval/models/__pycache__/gguf.cpython-310.pyc ADDED
Binary file (4.1 kB). View file
 
lm-evaluation-harness/lm_eval/models/__pycache__/huggingface.cpython-310.pyc ADDED
Binary file (26.5 kB). View file
 
lm-evaluation-harness/lm_eval/models/__pycache__/mamba_lm.cpython-310.pyc ADDED
Binary file (3.69 kB). View file
 
lm-evaluation-harness/lm_eval/models/__pycache__/nemo_lm.cpython-310.pyc ADDED
Binary file (13.7 kB). View file
 
lm-evaluation-harness/lm_eval/models/__pycache__/neuralmagic.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
lm-evaluation-harness/lm_eval/models/__pycache__/neuron_optimum.cpython-310.pyc ADDED
Binary file (18.3 kB). View file
 
lm-evaluation-harness/lm_eval/models/__pycache__/openai_completions.cpython-310.pyc ADDED
Binary file (14.3 kB). View file
 
lm-evaluation-harness/lm_eval/models/__pycache__/optimum_lm.cpython-310.pyc ADDED
Binary file (2.01 kB). View file
 
lm-evaluation-harness/lm_eval/models/__pycache__/textsynth.cpython-310.pyc ADDED
Binary file (5.23 kB). View file
 
lm-evaluation-harness/lm_eval/models/__pycache__/utils.cpython-310.pyc ADDED
Binary file (20 kB). View file
 
lm-evaluation-harness/lm_eval/models/__pycache__/vllm_causallms.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
lm-evaluation-harness/lm_eval/models/anthropic_llms.py ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Tuple
2
+
3
+ from tqdm import tqdm
4
+
5
+ from lm_eval import utils
6
+ from lm_eval.api.model import LM
7
+ from lm_eval.api.registry import register_model
8
+ from lm_eval.models.utils import retry_on_specific_exceptions
9
+
10
+
11
+ eval_logger = utils.eval_logger
12
+
13
+
14
+ def anthropic_completion(
15
+ client, #: anthropic.Anthropic,
16
+ model: str,
17
+ prompt: str,
18
+ max_tokens_to_sample: int,
19
+ temperature: float,
20
+ stop: List[str],
21
+ **kwargs: Any,
22
+ ) -> str:
23
+ """Wrapper function around the Anthropic completion API client with exponential back-off
24
+ in case of RateLimitError.
25
+
26
+ params:
27
+ client: anthropic.Anthropic
28
+ Anthropic API client
29
+ model: str
30
+ Anthropic model e.g. 'claude-instant-v1', 'claude-2'
31
+ prompt: str
32
+ Prompt to feed to the model
33
+ max_tokens_to_sample: int
34
+ Maximum number of tokens to sample from the model
35
+ temperature: float
36
+ Sampling temperature
37
+ stop: List[str]
38
+ List of stop sequences
39
+ kwargs: Any
40
+ Additional model_args to pass to the API client
41
+ """
42
+
43
+ try:
44
+ import anthropic
45
+ except ModuleNotFoundError:
46
+ raise Exception(
47
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
48
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
49
+ )
50
+
51
+ def _exception_callback(e: Exception, sleep_time: float) -> None:
52
+ eval_logger.warning(
53
+ f"RateLimitError occurred: {e.__cause__}\n Retrying in {sleep_time} seconds"
54
+ )
55
+
56
+ @retry_on_specific_exceptions(
57
+ on_exceptions=[anthropic.RateLimitError],
58
+ max_retries=None, # retry forever, consider changing
59
+ on_exception_callback=_exception_callback,
60
+ )
61
+ def completion():
62
+ response = client.completions.create(
63
+ prompt=f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}",
64
+ model=model,
65
+ # NOTE: Claude really likes to do CoT, and overly aggressive stop sequences
66
+ # (e.g. gsm8k's ":") may truncate a lot of the input.
67
+ stop_sequences=[anthropic.HUMAN_PROMPT] + stop,
68
+ max_tokens_to_sample=max_tokens_to_sample,
69
+ temperature=temperature,
70
+ **kwargs,
71
+ )
72
+ return response.completion
73
+
74
+ return completion()
75
+
76
+
77
+ def anthropic_chat(
78
+ client, #: anthropic.Anthropic,
79
+ model: str,
80
+ prompt: str,
81
+ max_tokens: int,
82
+ temperature: float,
83
+ stop: List[str],
84
+ **kwargs: Any,
85
+ ) -> str:
86
+ """Wrapper function around the Anthropic completion API client with exponential back-off
87
+ in case of RateLimitError.
88
+
89
+ params:
90
+ client: anthropic.Anthropic
91
+ Anthropic API client
92
+ model: str
93
+ Anthropic model e.g. 'claude-3-opus-20240229', 'claude-3-sonnet-20240229'
94
+ prompt: str
95
+ Prompt to feed to the model
96
+ max_tokens: int
97
+ Maximum number of tokens to sample from the model
98
+ temperature: float
99
+ Sampling temperature
100
+ stop: List[str]
101
+ List of stop sequences
102
+ kwargs: Any
103
+ Additional model_args to pass to the API client
104
+ """
105
+
106
+ try:
107
+ import anthropic
108
+ except ModuleNotFoundError:
109
+ raise Exception(
110
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
111
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
112
+ )
113
+
114
+ def _exception_callback(e: Exception, sleep_time: float) -> None:
115
+ eval_logger.warning(
116
+ f"RateLimitError occurred: {e.__cause__}\n Retrying in {sleep_time} seconds"
117
+ )
118
+
119
+ @retry_on_specific_exceptions(
120
+ on_exceptions=[
121
+ anthropic.RateLimitError,
122
+ anthropic.APIConnectionError,
123
+ anthropic.APIStatusError,
124
+ ],
125
+ max_retries=None, # retry forever, consider changing
126
+ on_exception_callback=_exception_callback,
127
+ )
128
+ def messages():
129
+ response = client.messages.create(
130
+ model=model,
131
+ max_tokens=max_tokens,
132
+ temperature=temperature,
133
+ messages=[{"role": "user", "content": f"{prompt}"}],
134
+ **kwargs,
135
+ )
136
+ return response.content[0].text
137
+
138
+ return messages()
139
+
140
+
141
+ @register_model("anthropic")
142
+ class AnthropicLM(LM):
143
+ REQ_CHUNK_SIZE = 20 # TODO: not used
144
+
145
+ def __init__(
146
+ self,
147
+ batch_size: int = 1,
148
+ model: str = "claude-2.0",
149
+ max_tokens_to_sample: int = 256,
150
+ temperature: float = 0, # defaults to 1
151
+ **kwargs, # top_p, top_k, etc.
152
+ ) -> None:
153
+ """Anthropic API wrapper.
154
+
155
+ :param model: str
156
+ Anthropic model e.g. 'claude-instant-v1', 'claude-2'
157
+ :param max_tokens_to_sample: int
158
+ Maximum number of tokens to sample from the model
159
+ :param temperature: float
160
+ Sampling temperature
161
+ :param kwargs: Any
162
+ Additional model_args to pass to the API client
163
+ """
164
+ super().__init__()
165
+
166
+ try:
167
+ import anthropic
168
+ except ModuleNotFoundError:
169
+ raise Exception(
170
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
171
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
172
+ )
173
+
174
+ self.model = model
175
+ # defaults to os.environ.get("ANTHROPIC_API_KEY")
176
+ self.client = anthropic.Anthropic()
177
+ self.temperature = temperature
178
+ self.max_tokens_to_sample = max_tokens_to_sample
179
+ self.tokenizer = self.client.get_tokenizer()
180
+ self.kwargs = kwargs
181
+
182
+ @property
183
+ def eot_token_id(self):
184
+ # Not sure but anthropic.HUMAN_PROMPT ?
185
+ raise NotImplementedError("No idea about anthropic tokenization.")
186
+
187
+ @property
188
+ def max_length(self) -> int:
189
+ return 2048
190
+
191
+ @property
192
+ def max_gen_toks(self) -> int:
193
+ return self.max_tokens_to_sample
194
+
195
+ @property
196
+ def batch_size(self):
197
+ # Isn't used because we override _loglikelihood_tokens
198
+ raise NotImplementedError("No support for logits.")
199
+
200
+ @property
201
+ def device(self):
202
+ # Isn't used because we override _loglikelihood_tokens
203
+ raise NotImplementedError("No support for logits.")
204
+
205
+ def tok_encode(self, string: str) -> List[int]:
206
+ return self.tokenizer.encode(string).ids
207
+
208
+ def tok_decode(self, tokens: List[int]) -> str:
209
+ return self.tokenizer.decode(tokens)
210
+
211
+ def _loglikelihood_tokens(self, requests, disable_tqdm: bool = False):
212
+ raise NotImplementedError("No support for logits.")
213
+
214
+ def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]:
215
+ try:
216
+ import anthropic
217
+ except ModuleNotFoundError:
218
+ raise Exception(
219
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
220
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
221
+ )
222
+
223
+ if not requests:
224
+ return []
225
+
226
+ _requests: List[Tuple[str, dict]] = [req.args for req in requests]
227
+
228
+ res = []
229
+ for request in tqdm(_requests, disable=disable_tqdm):
230
+ try:
231
+ inp = request[0]
232
+ request_args = request[1]
233
+ # generation_kwargs
234
+ until = request_args.get("until")
235
+ max_gen_toks = request_args.get("max_gen_toks", self.max_length)
236
+ temperature = request_args.get("temperature", self.temperature)
237
+ response = anthropic_completion(
238
+ client=self.client,
239
+ model=self.model,
240
+ prompt=inp,
241
+ max_tokens_to_sample=max_gen_toks,
242
+ temperature=temperature, # TODO: implement non-greedy sampling for Anthropic
243
+ stop=until, # type: ignore
244
+ **self.kwargs,
245
+ )
246
+ res.append(response)
247
+
248
+ self.cache_hook.add_partial("generate_until", request, response)
249
+ except anthropic.APIConnectionError as e: # type: ignore # noqa: F821
250
+ eval_logger.critical(f"Server unreachable: {e.__cause__}")
251
+ break
252
+ except anthropic.APIStatusError as e: # type: ignore # noqa: F821
253
+ eval_logger.critical(f"API error {e.status_code}: {e.message}")
254
+ break
255
+
256
+ return res
257
+
258
+ def _model_call(self, inps):
259
+ # Isn't used because we override _loglikelihood_tokens
260
+ raise NotImplementedError()
261
+
262
+ def _model_generate(self, context, max_length, eos_token_id):
263
+ # Isn't used because we override generate_until
264
+ raise NotImplementedError()
265
+
266
+ def loglikelihood(self, requests, disable_tqdm: bool = False):
267
+ raise NotImplementedError("No support for logits.")
268
+
269
+ def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
270
+ raise NotImplementedError("No support for logits.")
271
+
272
+
273
+ @register_model("anthropic-chat", "anthropic-chat-completions")
274
+ class AnthropicChatLM(AnthropicLM):
275
+ REQ_CHUNK_SIZE = 20 # TODO: not used
276
+
277
+ def __init__(
278
+ self,
279
+ model: str,
280
+ batch_size: int = 1,
281
+ max_tokens: int = 256,
282
+ temperature: float = 0, # defaults to 1
283
+ **kwargs, # top_p, top_k, etc.
284
+ ) -> None:
285
+ """Anthropic API wrapper.
286
+
287
+ :param model: str
288
+ Anthropic model e.g. 'claude-3-opus-20240229', 'claude-3-sonnet-20240229'
289
+ :param max_tokens: int
290
+ Maximum number of tokens to sample from the model
291
+ :param temperature: float
292
+ Sampling temperature
293
+ :param kwargs: Any
294
+ Additional model_args to pass to the API client
295
+ """
296
+ super().__init__()
297
+
298
+ try:
299
+ import anthropic
300
+ except ModuleNotFoundError:
301
+ raise Exception(
302
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
303
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
304
+ )
305
+
306
+ self.model = model
307
+ # defaults to os.environ.get("ANTHROPIC_API_KEY")
308
+ self.client = anthropic.Anthropic()
309
+ self.temperature = temperature
310
+ self.max_token = max_tokens
311
+ self.tokenizer = self.client.get_tokenizer()
312
+ self.kwargs = kwargs
313
+
314
+ @property
315
+ def max_gen_toks(self) -> int:
316
+ return self.max_tokens
317
+
318
+ def generate_until(self, requests) -> List[str]:
319
+ try:
320
+ import anthropic
321
+ except ModuleNotFoundError:
322
+ raise Exception(
323
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
324
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
325
+ )
326
+
327
+ if not requests:
328
+ return []
329
+
330
+ _requests: List[Tuple[str, dict]] = [req.args for req in requests]
331
+
332
+ res = []
333
+ for request in tqdm(_requests):
334
+ try:
335
+ inp = request[0]
336
+ request_args = request[1]
337
+ # generation_kwargs
338
+ until = request_args.get("until")
339
+ max_tokens = request_args.get("max_gen_toks", self.max_length)
340
+ temperature = request_args.get("temperature", self.temperature)
341
+ response = anthropic_chat(
342
+ client=self.client,
343
+ model=self.model,
344
+ prompt=inp,
345
+ max_tokens=max_tokens,
346
+ temperature=temperature, # TODO: implement non-greedy sampling for Anthropic
347
+ stop=until, # type: ignore
348
+ **self.kwargs,
349
+ )
350
+ res.append(response)
351
+
352
+ self.cache_hook.add_partial("generate_until", request, response)
353
+ except anthropic.APIConnectionError as e: # type: ignore # noqa: F821
354
+ eval_logger.critical(f"Server unreachable: {e.__cause__}")
355
+ break
356
+ except anthropic.APIStatusError as e: # type: ignore # noqa: F821
357
+ eval_logger.critical(f"API error {e.status_code}: {e.message}")
358
+ break
359
+
360
+ return res
lm-evaluation-harness/lm_eval/models/dummy.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ from tqdm import tqdm
4
+
5
+ from lm_eval.api.model import LM
6
+ from lm_eval.api.registry import register_model
7
+
8
+
9
+ @register_model("dummy")
10
+ class DummyLM(LM):
11
+ def __init__(self) -> None:
12
+ super().__init__()
13
+
14
+ @classmethod
15
+ def create_from_arg_string(cls, arg_string, additional_config=None):
16
+ return cls()
17
+
18
+ def loglikelihood(self, requests, disable_tqdm: bool = False):
19
+ res = []
20
+
21
+ for _ in tqdm(requests, disable=disable_tqdm):
22
+ res.append((-random.random(), False))
23
+
24
+ return res
25
+
26
+ def generate_until(self, requests, disable_tqdm: bool = False):
27
+ res = []
28
+
29
+ for ctx, _ in tqdm(requests, disable=disable_tqdm):
30
+ res.append("lol")
31
+ assert ctx.strip() != ""
32
+
33
+ return res
34
+
35
+ def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
36
+ res = []
37
+
38
+ for _ in tqdm(requests, disable=disable_tqdm):
39
+ res.append(-random.random())
40
+
41
+ return res
lm-evaluation-harness/lm_eval/models/gguf.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import time
3
+
4
+ import requests
5
+ from requests.exceptions import RequestException
6
+ from tqdm import tqdm
7
+
8
+ from lm_eval.api.model import LM
9
+ from lm_eval.api.registry import register_model
10
+
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ def get_result(logprobs, context_length):
16
+ is_greedy = True
17
+ offsets = logprobs["text_offset"]
18
+ tokens = logprobs["tokens"]
19
+ tokens_logprobs = logprobs["token_logprobs"]
20
+
21
+ idx = 0
22
+ while offsets[idx] < context_length:
23
+ idx += 1
24
+ continuation_logprobs = sum(tokens_logprobs[idx:-1])
25
+ for i in range(idx, len(tokens)):
26
+ token = tokens[i]
27
+ top_tokens = logprobs["top_logprobs"][i]
28
+ top_token = max(top_tokens.keys(), key=lambda x: top_tokens[x])
29
+ if top_token != token:
30
+ is_greedy = False
31
+ break
32
+
33
+ return continuation_logprobs, is_greedy
34
+
35
+
36
+ @register_model("gguf", "ggml")
37
+ class GGUFLM(LM):
38
+ def __init__(self, base_url=None, max_length=2048, **kwargs):
39
+ super().__init__()
40
+ self.base_url = base_url
41
+ assert self.base_url, "must pass `base_url` to use GGUF LM!"
42
+ self.logprobs = 10
43
+ self.temperature = 0.0
44
+ self.max_length = max_length
45
+
46
+ def gguf_completion(
47
+ self, context, continuation=None, stop=None, retries=3, delay=5, **kwargs
48
+ ):
49
+ for _ in range(retries):
50
+ try:
51
+ prompt = context
52
+ request = {
53
+ "prompt": prompt,
54
+ "logprobs": self.logprobs,
55
+ "temperature": self.temperature,
56
+ }
57
+ if continuation:
58
+ prompt += continuation
59
+ request.update({"prompt": prompt, "max_tokens": 1, "echo": True})
60
+ if stop is not None:
61
+ request["stop"] = stop
62
+ response = requests.post(
63
+ f"{self.base_url}/v1/completions", json=request
64
+ )
65
+ response.raise_for_status()
66
+ return response.json()
67
+ except RequestException as e:
68
+ logger.error(f"RequestException: {e}")
69
+ time.sleep(delay) # wait before retrying
70
+ else:
71
+ raise Exception(f"Failed to get a valid response after {retries} retries.")
72
+
73
+ def loglikelihood(self, requests, disable_tqdm: bool = False):
74
+ if not requests:
75
+ return []
76
+ res = []
77
+ for context, continuation in tqdm(
78
+ [req.args for req in requests], disable=disable_tqdm
79
+ ):
80
+ response = self.gguf_completion(context=context, continuation=continuation)
81
+ if response and "choices" in response and response["choices"]:
82
+ choice = response["choices"][0]
83
+ logprobs = choice.get("logprobs")
84
+ if (
85
+ logprobs
86
+ and "token_logprobs" in logprobs
87
+ and logprobs["token_logprobs"]
88
+ ):
89
+ logprob, is_greedy = get_result(logprobs, len(context))
90
+ res.append((logprob, is_greedy))
91
+ else:
92
+ logger.warning(
93
+ "Invalid logprobs data. Expected 'logprobs' to contain 'token_logprobs' list."
94
+ )
95
+ else:
96
+ logger.error(
97
+ f"Invalid response for loglikelihood. Response: {response}"
98
+ )
99
+ assert False
100
+ return res
101
+
102
+ def generate_until(self, requests, disable_tqdm: bool = False):
103
+ if not requests:
104
+ return []
105
+
106
+ res = []
107
+ for request in tqdm([req.args for req in requests], disable=disable_tqdm):
108
+ inp = request[0]
109
+ request_args = request[1]
110
+ until = request_args.get("until", ["</s>"])
111
+ response = self.gguf_completion(context=inp, stop=until)
112
+ if response and "choices" in response and response["choices"]:
113
+ choice = response["choices"][0]
114
+ if "text" in choice:
115
+ generated_text = choice["text"].strip()
116
+ res.append(generated_text)
117
+ else:
118
+ logger.error(
119
+ f"Invalid response for greedy_until. Response: {response}"
120
+ )
121
+ res.append(None) # Add default value in case of error
122
+ else:
123
+ logger.error(f"Invalid response for greedy_until. Response: {response}")
124
+ res.append(None) # Add default value in case of error
125
+ return res
126
+
127
+ def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
128
+ raise NotImplementedError(
129
+ "loglikelihood_rolling not yet supported for GGUF models"
130
+ )
lm-evaluation-harness/lm_eval/models/huggingface.py ADDED
@@ -0,0 +1,1274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import os
3
+ from datetime import timedelta
4
+ from pathlib import Path
5
+ from typing import List, Literal, Optional, Tuple, Union
6
+
7
+ import torch
8
+ import torch.nn.functional as F
9
+ import transformers
10
+ from accelerate import (
11
+ Accelerator,
12
+ DistributedType,
13
+ InitProcessGroupKwargs,
14
+ find_executable_batch_size,
15
+ )
16
+ from packaging import version
17
+ from peft import PeftModel
18
+ from peft import __version__ as PEFT_VERSION
19
+ from tqdm import tqdm
20
+ from transformers.models.auto.modeling_auto import (
21
+ MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
22
+ MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
23
+ )
24
+
25
+ from lm_eval import utils
26
+ from lm_eval.api.instance import Instance
27
+ from lm_eval.api.model import TemplateLM
28
+ from lm_eval.api.registry import register_model
29
+ from lm_eval.models.utils import (
30
+ Collator,
31
+ clear_torch_cache,
32
+ get_dtype,
33
+ pad_and_concat,
34
+ stop_sequences_criteria,
35
+ )
36
+
37
+
38
+ eval_logger = utils.eval_logger
39
+
40
+
41
+ def _get_accelerate_args(
42
+ device_map_option: Optional[str] = "auto",
43
+ max_memory_per_gpu: Optional[Union[int, str]] = None,
44
+ max_cpu_memory: Optional[Union[int, str]] = None,
45
+ offload_folder: Optional[str] = "./offload",
46
+ ) -> dict:
47
+ """Returns the kwargs needed to apply `accelerate` in `AutoModel.from_pretrained`."""
48
+ max_memory = {}
49
+ if max_memory_per_gpu is not None:
50
+ max_memory_per_gpu_map = {
51
+ device_idx: max_memory_per_gpu
52
+ for device_idx in range(torch.cuda.device_count())
53
+ }
54
+ max_memory.update(max_memory_per_gpu_map)
55
+ if max_cpu_memory is not None:
56
+ max_memory["cpu"] = max_cpu_memory
57
+
58
+ args = {}
59
+ if max_memory:
60
+ args["max_memory"] = max_memory
61
+ args["device_map"] = device_map_option
62
+ args["offload_folder"] = offload_folder
63
+ return args
64
+
65
+
66
+ @register_model("hf-auto", "hf", "huggingface")
67
+ class HFLM(TemplateLM):
68
+ """
69
+ An abstracted Huggingface model class. Enables usage with both models of
70
+ `transformers.AutoModelForCausalLM` and `transformers.AutoModelForSeq2SeqLM` classes.
71
+
72
+ Supports data-parallel multi-GPU with HF Accelerate.
73
+ """
74
+
75
+ AUTO_MODEL_CLASS = None
76
+ _DEFAULT_MAX_LENGTH = 2048
77
+
78
+ def __init__(
79
+ self,
80
+ pretrained: Optional[Union[str, transformers.PreTrainedModel]] = "gpt2",
81
+ backend: Optional[Literal["default", "causal", "seq2seq"]] = "default",
82
+ # override whether the model should be treated as decoder-only (causal) or encoder-decoder (seq2seq)
83
+ revision: Optional[str] = "main",
84
+ subfolder: Optional[str] = None,
85
+ tokenizer: Optional[
86
+ Union[
87
+ str,
88
+ transformers.PreTrainedTokenizer,
89
+ transformers.PreTrainedTokenizerFast,
90
+ ]
91
+ ] = None,
92
+ truncation: Optional[bool] = False,
93
+ logits_cache: bool = True,
94
+ max_length: Optional[int] = None,
95
+ device: Optional[str] = "cuda",
96
+ dtype: Optional[Union[str, torch.dtype]] = "auto",
97
+ batch_size: Optional[Union[int, str]] = 1,
98
+ max_batch_size: Optional[int] = 64,
99
+ trust_remote_code: Optional[bool] = False,
100
+ use_fast_tokenizer: Optional[bool] = True,
101
+ add_bos_token: Optional[bool] = False,
102
+ prefix_token_id: Optional[int] = None,
103
+ # arguments used for splitting a model across GPUs naively.
104
+ # only used if `parallelize=True`.
105
+ parallelize: Optional[bool] = False,
106
+ device_map_option: Optional[str] = "auto",
107
+ max_memory_per_gpu: Optional[Union[int, str]] = None,
108
+ max_cpu_memory: Optional[Union[int, str]] = None,
109
+ offload_folder: Optional[Union[str, os.PathLike]] = "./offload",
110
+ # PEFT, delta weights and quantization options
111
+ peft: Optional[str] = None,
112
+ delta: Optional[str] = None,
113
+ autogptq: Optional[Union[bool, str]] = False,
114
+ **kwargs,
115
+ ) -> None:
116
+ super().__init__()
117
+
118
+ # optionally: take in an already-initialized transformers.PreTrainedModel
119
+ if not isinstance(pretrained, str):
120
+ eval_logger.warning(
121
+ "`pretrained` model kwarg is not of type `str`. Many other model arguments may be ignored. Please do not launch via accelerate or use `parallelize=True` if passing an existing model this way."
122
+ )
123
+ assert not parallelize, "`parallelize=True` is not compatible with passing pre-initialized model to `pretrained`"
124
+ self._model = pretrained
125
+ self._device = self._model.device
126
+ self._config = self._model.config
127
+ gpus = 0
128
+
129
+ if tokenizer:
130
+ assert isinstance(
131
+ tokenizer, transformers.PreTrainedTokenizer
132
+ ) or isinstance(tokenizer, transformers.PreTrainedTokenizerFast)
133
+ self.tokenizer = tokenizer
134
+ else:
135
+ # Get tokenizer
136
+ model_name = self._model.name_or_path
137
+ self.tokenizer = transformers.AutoTokenizer.from_pretrained(
138
+ model_name,
139
+ revision=revision,
140
+ trust_remote_code=trust_remote_code,
141
+ use_fast=use_fast_tokenizer,
142
+ )
143
+
144
+ else:
145
+ assert isinstance(device, str)
146
+ assert isinstance(pretrained, str)
147
+ assert isinstance(batch_size, (int, str))
148
+
149
+ gpus = torch.cuda.device_count()
150
+ accelerator_kwargs = InitProcessGroupKwargs(timeout=timedelta(weeks=52))
151
+ accelerator = Accelerator(kwargs_handlers=[accelerator_kwargs])
152
+ if accelerator.num_processes > 1:
153
+ self.accelerator = accelerator
154
+
155
+ if not (parallelize or accelerator.num_processes > 1):
156
+ # use user-passed device
157
+ device_list = set(
158
+ ["cuda", "cpu"]
159
+ + [f"cuda:{i}" for i in range(torch.cuda.device_count())]
160
+ + ["mps", "mps:0"]
161
+ )
162
+ if device and device in device_list:
163
+ self._device = torch.device(device)
164
+ eval_logger.info(f"Using device '{device}'")
165
+ if device in ("mps", "mps:0") and version.parse(
166
+ torch.__version__
167
+ ) < version.parse("2.1"):
168
+ raise RuntimeError(
169
+ f"mps requires torch >= 2.1. You have {torch.__version__}"
170
+ )
171
+ else:
172
+ eval_logger.info("Device not specified")
173
+ eval_logger.info(f"Cuda Available? {torch.cuda.is_available()}")
174
+ self._device = (
175
+ torch.device("cuda")
176
+ if torch.cuda.is_available()
177
+ else torch.device("cpu")
178
+ )
179
+ else:
180
+ if device != "cuda":
181
+ eval_logger.info(
182
+ f"Using `accelerate launch` or `parallelize=True`, device '{device}' will be overridden when placing model."
183
+ )
184
+ # TODO: include in warning that `load_in_8bit` etc. affect this too
185
+ self._device = torch.device(device)
186
+
187
+ # TODO: update this to be less of a hack once subfolder is fixed in HF
188
+ revision = revision + ("/" + subfolder if subfolder is not None else "")
189
+
190
+ self._get_config(
191
+ pretrained,
192
+ revision=revision,
193
+ trust_remote_code=trust_remote_code,
194
+ )
195
+
196
+ # determine which of 'causal' and 'seq2seq' backends to use
197
+ self._get_backend(
198
+ config=self.config, backend=backend, trust_remote_code=trust_remote_code
199
+ )
200
+
201
+ # if we passed `pretrained` as a string, initialize our model now
202
+ if isinstance(pretrained, str):
203
+ self._create_model(
204
+ pretrained=pretrained,
205
+ revision=revision,
206
+ dtype=dtype,
207
+ trust_remote_code=trust_remote_code,
208
+ parallelize=parallelize,
209
+ device_map_option=device_map_option,
210
+ max_memory_per_gpu=max_memory_per_gpu,
211
+ max_cpu_memory=max_cpu_memory,
212
+ offload_folder=offload_folder,
213
+ peft=peft,
214
+ delta=delta,
215
+ autogptq=autogptq,
216
+ **kwargs,
217
+ )
218
+
219
+ # access self._model through self.model property outside this method
220
+ if isinstance(self.model, torch.nn.Module):
221
+ self.model.eval()
222
+ self.model.tie_weights()
223
+
224
+ if isinstance(pretrained, str) and (gpus >= 1 or str(self.device) == "mps"):
225
+ # TODO: can remove this whole snippet except in the mps case, perhaps?
226
+ if not (parallelize or autogptq or hasattr(self, "accelerator")):
227
+ # place model onto device requested manually,
228
+ # if not using HF Accelerate or device_map
229
+ # or any other option that preloads model onto device
230
+ try:
231
+ self.model.to(self.device)
232
+ except ValueError:
233
+ eval_logger.debug(
234
+ "Failed to place model onto specified device. This may be because the model is quantized via `bitsandbytes` or `device_map` is provided. If the desired GPU is being used, this message is safe to ignore."
235
+ )
236
+
237
+ self._create_tokenizer(
238
+ pretrained,
239
+ tokenizer,
240
+ revision=revision,
241
+ trust_remote_code=trust_remote_code,
242
+ use_fast_tokenizer=use_fast_tokenizer,
243
+ )
244
+
245
+ self.truncation = truncation
246
+ self.logits_cache = logits_cache
247
+ self.vocab_size = self.tokenizer.vocab_size
248
+ # select (or create) a pad token to use
249
+ if self.tokenizer.pad_token:
250
+ pass
251
+ elif self.tokenizer.unk_token:
252
+ self.tokenizer.pad_token_id = self.tokenizer.unk_token_id
253
+ elif self.tokenizer.eos_token:
254
+ self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
255
+ else:
256
+ if getattr(self.config, "model_type", None) == "qwen":
257
+ # Qwen's trust_remote_code tokenizer does not allow for adding special tokens
258
+ self.tokenizer.pad_token = "<|endoftext|>"
259
+ elif (
260
+ self.tokenizer.__class__.__name__ == "RWKVWorldTokenizer"
261
+ or self.tokenizer.__class__.__name__ == "Rwkv5Tokenizer"
262
+ ):
263
+ # The RWKV world tokenizer, does not allow for adding special tokens / setting the pad token (which is set as 0)
264
+ # The additional tokenizer name check is needed, as there exists rwkv4 models with neox tokenizer
265
+ # ---
266
+ # Note that the world tokenizer class name, might change in the future for the final huggingface merge
267
+ # https://github.com/huggingface/transformers/pull/26963
268
+ assert self.tokenizer.pad_token_id == 0
269
+ else:
270
+ self.tokenizer.add_special_tokens({"pad_token": "<|pad|>"})
271
+
272
+ # TODO: override this for Gemma
273
+ self.add_bos_token = add_bos_token
274
+ if getattr(self.config, "model_type", None) == "gemma":
275
+ self.add_bos_token = True
276
+ eval_logger.info(
277
+ f"Model type is '{self.config.model_type}', a BOS token will be used as Gemma underperforms without it."
278
+ )
279
+
280
+ self._max_length = max_length
281
+
282
+ self.batch_schedule = 1
283
+ self.batch_sizes = {}
284
+ self.max_batch_size = max_batch_size
285
+
286
+ if str(batch_size).startswith("auto"):
287
+ batch_size = batch_size.split(":")
288
+ self.batch_size_per_gpu = batch_size[0]
289
+ self.batch_schedule = float(batch_size[1]) if len(batch_size) > 1 else 1
290
+ else:
291
+ self.batch_size_per_gpu = int(batch_size)
292
+
293
+ if isinstance(pretrained, str):
294
+ # multigpu data-parallel support when launched with accelerate
295
+ if gpus > 1:
296
+ if parallelize:
297
+ if accelerator.num_processes > 1:
298
+ raise RuntimeError(
299
+ "Attempted to use both a HF Accelerate `device_map` and to launch via `accelerate launch`. If this is the case, please either remove `parallelize=True` from --model_args or launch outside of the Accelerate launcher."
300
+ )
301
+ else:
302
+ pass
303
+ elif accelerator.num_processes == 1:
304
+ # if we aren't launching via accelerate, ditch
305
+ self._rank = 0
306
+ self._world_size = 1
307
+ else:
308
+ if gpus > accelerator.num_processes:
309
+ eval_logger.warning(
310
+ "WARNING: The number of total system GPUs does not match the number of spawned processes. "
311
+ "If you would like to use data parallelism, please launch the script "
312
+ "with 'accelerate launch *script*'. "
313
+ f"Current run will proceed with {accelerator.num_processes} devices."
314
+ )
315
+ assert (
316
+ accelerator.distributed_type
317
+ in [
318
+ DistributedType.FSDP,
319
+ DistributedType.MULTI_GPU,
320
+ ]
321
+ ), "Unsupported distributed type provided. Only DDP and FSDP are supported."
322
+ if accelerator.distributed_type == DistributedType.FSDP:
323
+ self._model = accelerator.prepare(self.model)
324
+ else:
325
+ self._model = accelerator.prepare_model(
326
+ self.model, evaluation_mode=True
327
+ )
328
+ self._device = torch.device(
329
+ f"cuda:{accelerator.local_process_index}"
330
+ )
331
+ self.accelerator = accelerator
332
+
333
+ if self.accelerator.is_local_main_process:
334
+ eval_logger.info(f"Using {gpus} devices with data parallelism")
335
+
336
+ self._rank = self.accelerator.local_process_index
337
+ self._world_size = self.accelerator.num_processes
338
+ else:
339
+ # if a PreTrainedModel was passed into HFLM, we forgo distributed setup.
340
+ eval_logger.warning(
341
+ "Passed an already-initialized model through `pretrained`, assuming single-process call to evaluate() or custom distributed integration"
342
+ )
343
+ self._rank = 0
344
+ self._world_size = 1
345
+
346
+ self.custom_prefix_token_id = prefix_token_id
347
+ if prefix_token_id is not None:
348
+ eval_logger.info(
349
+ f"Loglikelihood prefix token id used in evaluation: {self.prefix_token_id}"
350
+ )
351
+
352
+ @property
353
+ def config(self):
354
+ # return the associated transformers.AutoConfig for the given pretrained model.
355
+ return self._config
356
+
357
+ @property
358
+ def model(self):
359
+ # returns the model, unwrapping it if using Accelerate
360
+ if hasattr(self, "accelerator"):
361
+ return self.accelerator.unwrap_model(self._model)
362
+ else:
363
+ return self._model
364
+
365
+ @property
366
+ def eot_token_id(self):
367
+ # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
368
+ return self.tokenizer.eos_token_id
369
+
370
+ @property
371
+ def prefix_token_id(self):
372
+ # it is used as prefix for loglikelihood
373
+ if self.custom_prefix_token_id is not None:
374
+ return self.custom_prefix_token_id
375
+ if self.tokenizer.bos_token_id is not None:
376
+ return self.tokenizer.bos_token_id
377
+ return self.tokenizer.eos_token_id
378
+
379
+ @property
380
+ def max_length(self):
381
+ if self._max_length: # if max length manually set, return it
382
+ return self._max_length
383
+ seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx")
384
+ for attr in seqlen_config_attrs:
385
+ if hasattr(self.model.config, attr):
386
+ return getattr(self.model.config, attr)
387
+ if hasattr(self.tokenizer, "model_max_length"):
388
+ if self.tokenizer.model_max_length == 1000000000000000019884624838656:
389
+ return self._DEFAULT_MAX_LENGTH
390
+ return self.tokenizer.model_max_length
391
+ return self._DEFAULT_MAX_LENGTH
392
+
393
+ @property
394
+ def max_gen_toks(self) -> int:
395
+ return 256
396
+
397
+ @property
398
+ def batch_size(self):
399
+ return self.batch_size_per_gpu
400
+
401
+ @property
402
+ def device(self):
403
+ return self._device
404
+
405
+ @property
406
+ def rank(self):
407
+ return self._rank
408
+
409
+ @property
410
+ def world_size(self):
411
+ return self._world_size
412
+
413
+ def _get_backend(
414
+ self,
415
+ config: Union[transformers.PretrainedConfig, transformers.AutoConfig],
416
+ backend: Optional[Literal["default", "causal", "seq2seq"]] = "default",
417
+ trust_remote_code: Optional[bool] = False,
418
+ ) -> None:
419
+ """
420
+ Helper method during initialization.
421
+ Determines the backend ("causal" (decoder-only) or "seq2seq" (encoder-decoder))
422
+ model type to be used.
423
+ """
424
+ assert backend in ["default", "causal", "seq2seq"]
425
+
426
+ if backend != "default":
427
+ # if we've settled on non-default backend, use that manually
428
+ if backend == "causal":
429
+ self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM
430
+ elif backend == "seq2seq":
431
+ self.AUTO_MODEL_CLASS = transformers.AutoModelForSeq2SeqLM
432
+ eval_logger.info(
433
+ f"Overrode HF model backend type, and using type '{backend}'"
434
+ )
435
+ else:
436
+ # determine and use the default HF backend for this model, based on its config + metadata.
437
+ if (
438
+ getattr(config, "model_type")
439
+ in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
440
+ ):
441
+ # first check if model type is listed under seq2seq models, since some
442
+ # models like MBart are listed in both seq2seq and causal mistakenly in HF transformers.
443
+ # these special cases should be treated as seq2seq models.
444
+ self.AUTO_MODEL_CLASS = transformers.AutoModelForSeq2SeqLM
445
+ elif (
446
+ getattr(self.config, "model_type") in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
447
+ ):
448
+ self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM
449
+ else:
450
+ if not trust_remote_code:
451
+ eval_logger.warning(
452
+ "HF model type is neither marked as CausalLM or Seq2SeqLM. \
453
+ This is expected if your model requires `trust_remote_code=True` but may be an error otherwise."
454
+ )
455
+ # if model type is neither in HF transformers causal or seq2seq model registries
456
+ # then we default to AutoModelForCausalLM
457
+ self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM
458
+
459
+ assert self.AUTO_MODEL_CLASS in [
460
+ transformers.AutoModelForCausalLM,
461
+ transformers.AutoModelForSeq2SeqLM,
462
+ ]
463
+ return None
464
+
465
+ def _get_config(
466
+ self,
467
+ pretrained: str,
468
+ revision: str = "main",
469
+ trust_remote_code: bool = False,
470
+ ) -> None:
471
+ self._config = transformers.AutoConfig.from_pretrained(
472
+ pretrained,
473
+ revision=revision,
474
+ trust_remote_code=trust_remote_code,
475
+ )
476
+
477
+ def _create_model(
478
+ self,
479
+ pretrained: str,
480
+ revision: Optional[str] = "main",
481
+ dtype: Optional[Union[str, torch.dtype]] = "auto",
482
+ trust_remote_code: Optional[bool] = False,
483
+ # arguments used for splitting a model across GPUs naively.
484
+ # only used if `parallelize=True`.
485
+ # (accelerate naive PP (device_map) options)
486
+ parallelize: Optional[bool] = False,
487
+ device_map_option: Optional[str] = "auto",
488
+ max_memory_per_gpu: Optional[Union[int, str]] = None,
489
+ max_cpu_memory: Optional[Union[int, str]] = None,
490
+ offload_folder: Optional[str] = "./offload",
491
+ # PEFT, delta weights and quantization options
492
+ peft: Optional[str] = None,
493
+ delta: Optional[str] = None,
494
+ autogptq: Optional[Union[bool, str]] = False,
495
+ **kwargs,
496
+ ) -> None:
497
+ """
498
+ Initializes an HF or HF-compatible PreTrainedModel from scratch
499
+ inside HFLM, using the kwargs passed into self.__init__().
500
+
501
+ Also handles functionality such as AutoGPTQ usage and PEFT wrapping.
502
+
503
+ For future similar extensions to AutoGPTQ that are not core to HF's ecosystem,
504
+ (such as PyTorch models that are nearly, but not quite, fully mirroring
505
+ HF's public interface relied on in this HFLM class)
506
+ please consider subclassing HFLM and overriding this and other methods as needed.
507
+ """
508
+
509
+ model_kwargs = kwargs if kwargs else {}
510
+
511
+ if parallelize:
512
+ model_kwargs.update(
513
+ _get_accelerate_args(
514
+ device_map_option, # TODO: phase out device_map_option?
515
+ max_memory_per_gpu,
516
+ max_cpu_memory,
517
+ offload_folder,
518
+ )
519
+ )
520
+ elif "device_map" not in model_kwargs:
521
+ # set a device_map to initialize model on the right GPU.
522
+ # this is needed because it seems that the default behavior
523
+ # for quantized models now seems to be device_map="auto"
524
+ # which breaks data-parallel mode.
525
+ if hasattr(self, "accelerator"):
526
+ model_kwargs.update(
527
+ {"device_map": {"": f"cuda:{self.accelerator.local_process_index}"}}
528
+ )
529
+ else:
530
+ model_kwargs.update({"device_map": {"": str(self.device)}})
531
+
532
+ if not autogptq:
533
+ if model_kwargs.get("load_in_4bit", None):
534
+ assert (
535
+ transformers.__version__ >= "4.30.0"
536
+ ), "load_in_4bit requires transformers >= 4.30.0"
537
+ if transformers.__version__ >= "4.30.0":
538
+ if model_kwargs.get("load_in_4bit", None):
539
+ if model_kwargs.get("bnb_4bit_compute_dtype", None):
540
+ model_kwargs["bnb_4bit_compute_dtype"] = get_dtype(
541
+ model_kwargs["bnb_4bit_compute_dtype"]
542
+ )
543
+ self._model = self.AUTO_MODEL_CLASS.from_pretrained(
544
+ pretrained,
545
+ revision=revision,
546
+ torch_dtype=get_dtype(dtype),
547
+ trust_remote_code=trust_remote_code,
548
+ **model_kwargs,
549
+ )
550
+ else:
551
+ try:
552
+ from auto_gptq import AutoGPTQForCausalLM
553
+ except ModuleNotFoundError:
554
+ raise Exception(
555
+ "Tried to load auto_gptq, but auto-gptq is not installed ",
556
+ "please install auto-gptq via pip install lm-eval[gptq] or pip install -e .[gptq]",
557
+ )
558
+
559
+ self._model = AutoGPTQForCausalLM.from_quantized(
560
+ pretrained,
561
+ trust_remote_code=trust_remote_code,
562
+ model_basename=None if autogptq is True else Path(autogptq).stem,
563
+ use_safetensors=True
564
+ if autogptq is True
565
+ else autogptq.endswith(".safetensors"),
566
+ **model_kwargs,
567
+ )
568
+
569
+ if peft and delta:
570
+ raise ValueError(
571
+ "Cannot use both 'peft' and 'delta' options at the same time."
572
+ )
573
+
574
+ if peft:
575
+ if model_kwargs.get("load_in_4bit", None):
576
+ if version.parse(PEFT_VERSION) < version.parse("0.4.0"):
577
+ raise AssertionError("load_in_4bit requires peft >= 0.4.0")
578
+ self._model = PeftModel.from_pretrained(
579
+ self._model, peft, revision=revision
580
+ )
581
+ elif delta:
582
+ if autogptq:
583
+ eval_logger.warning(
584
+ "Delta weights might trigger unexpected behavior when used with AutoGPTQ."
585
+ )
586
+ _model_delta = self.AUTO_MODEL_CLASS.from_pretrained(
587
+ delta,
588
+ revision=revision,
589
+ torch_dtype=get_dtype(dtype),
590
+ trust_remote_code=trust_remote_code,
591
+ **model_kwargs,
592
+ )
593
+ for name, param in self._model.state_dict().items():
594
+ try:
595
+ param.data += _model_delta.state_dict()[name]
596
+ except KeyError:
597
+ raise KeyError(f"Delta model is missing weights for layer: {name}")
598
+ except Exception as e:
599
+ raise RuntimeError(
600
+ f"Failed to add delta weights to layer {name}. Error: {e}"
601
+ )
602
+
603
+ del _model_delta
604
+
605
+ return None
606
+
607
+ def _create_tokenizer(
608
+ self,
609
+ pretrained: Union[str, transformers.PreTrainedModel],
610
+ tokenizer: Optional[
611
+ Union[
612
+ str,
613
+ transformers.PreTrainedTokenizer,
614
+ transformers.PreTrainedTokenizerFast,
615
+ ]
616
+ ],
617
+ revision: Optional[str] = "main",
618
+ trust_remote_code: Optional[bool] = False,
619
+ use_fast_tokenizer: Optional[bool] = True,
620
+ ) -> None:
621
+ """
622
+ Helper method during initialization.
623
+
624
+ Create a tokenizer object corresponding to the correct
625
+ tokenizer for value of `pretrained`, or use the pre-initialized tokenizer passed.
626
+ """
627
+
628
+ if tokenizer:
629
+ if isinstance(tokenizer, str):
630
+ self.tokenizer = transformers.AutoTokenizer.from_pretrained(
631
+ tokenizer,
632
+ revision=revision,
633
+ trust_remote_code=trust_remote_code,
634
+ use_fast=use_fast_tokenizer,
635
+ )
636
+ else:
637
+ assert isinstance(
638
+ tokenizer, transformers.PreTrainedTokenizer
639
+ ) or isinstance(tokenizer, transformers.PreTrainedTokenizerFast)
640
+ self.tokenizer = tokenizer
641
+ else:
642
+ # Get tokenizer based on 'pretrained'
643
+ if isinstance(pretrained, str):
644
+ model_name = pretrained
645
+ else:
646
+ # get the HF hub name via accessor on model
647
+ model_name = self.model.name_or_path
648
+ self.tokenizer = transformers.AutoTokenizer.from_pretrained(
649
+ model_name,
650
+ revision=revision,
651
+ trust_remote_code=trust_remote_code,
652
+ use_fast=use_fast_tokenizer,
653
+ )
654
+ return None
655
+
656
+ def _detect_batch_size(self, requests=None, pos: int = 0):
657
+ if requests:
658
+ _, context_enc, continuation_enc = requests[pos]
659
+ max_length = len(
660
+ (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1]
661
+ )
662
+ max_context_enc = len(context_enc[-(self.max_length + 1) :])
663
+ max_cont_enc = len(continuation_enc[-(self.max_length + 1) :])
664
+ else:
665
+ max_length = self.max_length
666
+
667
+ # if OOM, then halves batch_size and tries again
668
+ @find_executable_batch_size(starting_batch_size=self.max_batch_size)
669
+ def forward_batch(batch_size):
670
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
671
+ length = max(max_context_enc, max_cont_enc)
672
+ batched_conts = torch.ones(
673
+ (batch_size, length), device=self.device
674
+ ).long()
675
+ test_batch = torch.ones((batch_size, length), device=self.device).long()
676
+ call_kwargs = {
677
+ "attn_mask": test_batch,
678
+ "labels": batched_conts,
679
+ }
680
+ else:
681
+ call_kwargs = {}
682
+ test_batch = torch.ones(
683
+ (batch_size, max_length), device=self.device
684
+ ).long()
685
+ for _ in range(5):
686
+ out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1) # noqa: F841
687
+
688
+ return batch_size
689
+
690
+ try:
691
+ batch_size = forward_batch()
692
+ except RuntimeError as e:
693
+ if "No executable batch size found" in str(e):
694
+ batch_size = 1
695
+ else:
696
+ raise
697
+
698
+ if self.world_size > 1:
699
+ # if multi-GPU, always take minimum over all selected batch sizes
700
+ max_rnk_bs = torch.tensor([batch_size], device=self.device)
701
+ gathered = (
702
+ self.accelerator.gather(max_rnk_bs).cpu().detach().numpy().tolist()
703
+ )
704
+ batch_size = min(gathered)
705
+ clear_torch_cache()
706
+ return batch_size
707
+
708
+ clear_torch_cache()
709
+ return batch_size
710
+
711
+ def tok_encode(
712
+ self, string: str, left_truncate_len=None, add_special_tokens=None
713
+ ) -> List[int]:
714
+ """ """
715
+ # default for None - empty dict, use predefined tokenizer param
716
+ # used for all models except for CausalLM or predefined value
717
+ special_tokens_kwargs = {}
718
+
719
+ # by default for CausalLM - false or self.add_bos_token is set
720
+ if add_special_tokens is None:
721
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
722
+ special_tokens_kwargs = {
723
+ "add_special_tokens": False or self.add_bos_token
724
+ }
725
+ # otherwise the method explicitly defines the value
726
+ else:
727
+ special_tokens_kwargs = {"add_special_tokens": add_special_tokens}
728
+
729
+ encoding = self.tokenizer.encode(string, **special_tokens_kwargs)
730
+
731
+ # left-truncate the encoded context to be at most `left_truncate_len` tokens long
732
+ if left_truncate_len:
733
+ encoding = encoding[-left_truncate_len:]
734
+
735
+ return encoding
736
+
737
+ def tok_batch_encode(
738
+ self,
739
+ strings: List[str],
740
+ padding_side: str = "left",
741
+ left_truncate_len: int = None,
742
+ truncation: bool = False,
743
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
744
+ # encode a batch of strings. converts to tensors and pads automatically, unlike tok_encode.
745
+ old_padding_side = self.tokenizer.padding_side
746
+ self.tokenizer.padding_side = padding_side
747
+
748
+ add_special_tokens = {}
749
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
750
+ add_special_tokens = {"add_special_tokens": False or self.add_bos_token}
751
+
752
+ encoding = self.tokenizer(
753
+ strings,
754
+ truncation=truncation,
755
+ padding="longest",
756
+ return_tensors="pt",
757
+ **add_special_tokens,
758
+ )
759
+ if left_truncate_len:
760
+ encoding["input_ids"] = encoding["input_ids"][:, -left_truncate_len:]
761
+ encoding["attention_mask"] = encoding["attention_mask"][
762
+ :, -left_truncate_len:
763
+ ]
764
+ self.tokenizer.padding_side = old_padding_side
765
+
766
+ return encoding["input_ids"], encoding["attention_mask"]
767
+
768
+ def tok_decode(self, tokens, skip_special_tokens=True):
769
+ return self.tokenizer.decode(tokens, skip_special_tokens=skip_special_tokens)
770
+
771
+ def _model_call(self, inps, attn_mask=None, labels=None):
772
+ """
773
+ :param inps: torch.Tensor
774
+ A torch tensor of shape [batch, (sequence_ctx + sequence_cont)] or of shape
775
+ [batch, sequence_ctx]. the size of sequence may vary from call to call
776
+ :param attn_mask: torch.Tensor, optional
777
+ A torch tensor of shape [batch, (sequence_ctx + sequence_cont)]. Only passed
778
+ (and must be passed) if self.AUTO_MODEL_CLASS is transformers.AutoModelForSeq2SeqLM
779
+ :param labels: torch.Tensor, optional
780
+ A torch tensor of shape [batch, (sequence_ctx + sequence_cont)]. Only passed
781
+ (and must be passed) if self.AUTO_MODEL_CLASS is transformers.AutoModelForSeq2SeqLM
782
+ :return
783
+ A torch tensor of shape [batch, sequence, vocab] with the
784
+ logits returned from the model's decoder
785
+ """
786
+ with torch.no_grad():
787
+ if attn_mask is not None or labels is not None:
788
+ assert attn_mask is not None and labels is not None
789
+ assert self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM
790
+ return self.model(
791
+ input_ids=inps, attention_mask=attn_mask, labels=labels
792
+ ).logits
793
+ else:
794
+ assert self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM
795
+ return self.model(inps).logits
796
+
797
+ def _model_generate(self, context, max_length, stop, **generation_kwargs):
798
+ # temperature = 0.0 if not set
799
+ # if do_sample is false and temp==0.0:
800
+ # remove temperature, as do_sample=False takes care of this
801
+ # and we don't want a warning from HF
802
+ generation_kwargs["temperature"] = generation_kwargs.get("temperature", 0.0)
803
+ do_sample = generation_kwargs.get("do_sample", None)
804
+
805
+ # The temperature has to be a strictly positive float -- if it is 0.0, use greedy decoding strategies
806
+ if generation_kwargs.get("temperature") == 0.0 and do_sample is None:
807
+ generation_kwargs["do_sample"] = do_sample = False
808
+
809
+ if do_sample is False and generation_kwargs.get("temperature") == 0.0:
810
+ generation_kwargs.pop("temperature")
811
+ # build stopping criteria
812
+ stopping_criteria = stop_sequences_criteria(
813
+ self.tokenizer, stop, context.shape[1], context.shape[0]
814
+ )
815
+ return self.model.generate(
816
+ input_ids=context,
817
+ max_length=max_length,
818
+ stopping_criteria=stopping_criteria,
819
+ pad_token_id=self.tokenizer.pad_token_id,
820
+ use_cache=True,
821
+ **generation_kwargs,
822
+ )
823
+
824
+ def _select_cont_toks(
825
+ self, logits: torch.Tensor, contlen: int = None, inplen: int = None
826
+ ) -> torch.Tensor:
827
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
828
+ assert (
829
+ contlen and inplen
830
+ ), "Must pass input len and cont. len to select scored logits for causal LM"
831
+ # discard right-padding.
832
+ # also discard the input/context tokens. we'll only score continuations.
833
+ logits = logits[inplen - contlen : inplen]
834
+ elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
835
+ assert (
836
+ contlen and not inplen
837
+ ), "Selecting scored logits for Seq2SeqLM requires only cont. len"
838
+ # only discard right-padding.
839
+ # the logits input to this fn only contain decoder-side tokens.
840
+ logits = logits[:contlen]
841
+
842
+ return logits
843
+
844
+ def loglikelihood_rolling(
845
+ self, requests: List[Instance], disable_tqdm: bool = False
846
+ ) -> List[float]:
847
+ loglikelihoods = []
848
+
849
+ adaptive_batch_size = None
850
+ if self.batch_size == "auto":
851
+ # using rolling window with maximum context
852
+ print("Passed argument batch_size = auto. Detecting largest batch size")
853
+ batch_size = self._detect_batch_size()
854
+ print(f"Determined Largest batch size: {batch_size}")
855
+ adaptive_batch_size = batch_size
856
+
857
+ for (string,) in tqdm(
858
+ [req.args for req in requests], disable=(disable_tqdm or (self.rank != 0))
859
+ ):
860
+ rolling_token_windows = list(
861
+ map(
862
+ utils.make_disjoint_window,
863
+ utils.get_rolling_token_windows(
864
+ token_list=self.tok_encode(string),
865
+ prefix_token=self.prefix_token_id,
866
+ max_seq_len=self.max_length,
867
+ context_len=1,
868
+ ),
869
+ )
870
+ )
871
+
872
+ # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case
873
+ rolling_token_windows = [(None,) + x for x in rolling_token_windows]
874
+
875
+ pad_amnt = 0
876
+ if self.world_size > 1:
877
+ # We pad out the external document-level iterator so the inner iterator doesn't hang
878
+ mytensor = torch.tensor(len(rolling_token_windows), device=self.device)
879
+ gathered = (
880
+ self.accelerator.gather(mytensor).cpu().detach().numpy().tolist()
881
+ )
882
+
883
+ pad_amnt = max(gathered) - gathered[self.rank]
884
+ if pad_amnt > 0:
885
+ rolling_token_windows += pad_amnt * [rolling_token_windows[0]]
886
+
887
+ string_nll = self._loglikelihood_tokens(
888
+ requests=rolling_token_windows,
889
+ disable_tqdm=True,
890
+ override_bs=adaptive_batch_size,
891
+ )
892
+
893
+ if (self.world_size > 1) and (pad_amnt > 0):
894
+ string_nll = [x[0] for x in string_nll[:-pad_amnt]]
895
+ else:
896
+ # discard is_greedy
897
+ string_nll = [x[0] for x in string_nll]
898
+
899
+ string_nll = sum(string_nll)
900
+ loglikelihoods.append(string_nll)
901
+
902
+ return loglikelihoods
903
+
904
+ def _batch_scheduler(self, pos, n_reordered_requests):
905
+ sched = pos // int(len(n_reordered_requests) / self.batch_schedule)
906
+ if sched in self.batch_sizes:
907
+ return self.batch_sizes[sched]
908
+ if (len(self.batch_sizes) > 1) and (
909
+ self.batch_sizes[sched - 1] == self.max_batch_size
910
+ ):
911
+ # if previous batch size is already maximal, skip recomputation
912
+ self.batch_sizes[sched] = self.max_batch_size
913
+ return self.batch_sizes[sched]
914
+ print(
915
+ f"Passed argument batch_size = auto:{self.batch_schedule}. Detecting largest batch size"
916
+ )
917
+ self.batch_sizes[sched] = self._detect_batch_size(n_reordered_requests, pos)
918
+ print(f"Determined largest batch size: {self.batch_sizes[sched]}")
919
+ return self.batch_sizes[sched]
920
+
921
+ def _loglikelihood_tokens(
922
+ self,
923
+ requests: List[Tuple[Tuple[str, str], List[int], List[int]]],
924
+ disable_tqdm: bool = False,
925
+ override_bs: int = None,
926
+ ) -> List[Tuple[float, bool]]:
927
+ # TODO: implement some kind of efficient-request-middleware that lumps together requests with the same context
928
+ res = []
929
+
930
+ def _collate(req: Tuple[Tuple[str, str], List[int], List[int]]):
931
+ """Defines the key for the sorted method"""
932
+ # the negative sign on len(toks) sorts descending - this has a few advantages:
933
+ # - time estimates will always be over not underestimates, which is more useful for planning
934
+ # - to know the size of a batch when going through the list, you know the first one is always the batch
935
+ # padded context length. this is useful to simplify the batching logic and more importantly to make
936
+ # automatic adaptive batches much much easier to implement
937
+ # - any OOMs will happen right away rather than near the end
938
+
939
+ toks = req[1] + req[2]
940
+ return -len(toks), tuple(toks)
941
+
942
+ def _lookup_one_token_cont(req: Tuple[Tuple[str, str], List[int], List[int]]):
943
+ """Defines the key to group and lookup one-token continuations"""
944
+ # Use with group_by="contexts" (optional)"
945
+ # allows for the creation of a lookup, so we can reuse logits in case of one-token continuations.
946
+ # speeds up some multiple-choice tasks proportionally to the number of choices.
947
+ # groups requests by context+continuation[:-1] and infer on one request/group.
948
+ return req[-2] + req[-1][:-1]
949
+
950
+ re_ord = Collator(
951
+ requests,
952
+ sort_fn=_collate,
953
+ group_by="contexts"
954
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM
955
+ and self.logits_cache
956
+ else None,
957
+ group_fn=_lookup_one_token_cont,
958
+ )
959
+
960
+ # automatic (variable) batch size detection for vectorization
961
+ # pull longest context sample from request
962
+ n_reordered_requests = len(re_ord)
963
+ batch_size = (
964
+ self.batch_size
965
+ if self.batch_size != "auto"
966
+ else override_bs
967
+ if override_bs is not None
968
+ else 0
969
+ )
970
+ batch_fn = (
971
+ self._batch_scheduler
972
+ if self.batch_size == "auto"
973
+ and n_reordered_requests > 0
974
+ and not override_bs
975
+ else None
976
+ )
977
+
978
+ chunks = re_ord.get_batched(n=batch_size, batch_fn=batch_fn)
979
+ pbar = tqdm(
980
+ total=len(requests),
981
+ disable=(disable_tqdm or (self.rank != 0)),
982
+ desc="Running loglikelihood requests",
983
+ )
984
+ for chunk in chunks:
985
+ inps = []
986
+ cont_toks_list = []
987
+ inplens = []
988
+
989
+ conts = []
990
+ encoder_attns = []
991
+
992
+ padding_len_inp = None
993
+ padding_len_cont = None
994
+ # because vectorizing is annoying, we first convert each (context, continuation) pair to padded
995
+ # tensors, then we pack them together into a batch, call the model, and then pick it all apart
996
+ # again because vectorizing is annoying
997
+
998
+ for _, context_enc, continuation_enc in chunk:
999
+ # sanity check
1000
+ assert len(context_enc) > 0
1001
+ assert len(continuation_enc) > 0
1002
+ assert len(continuation_enc) <= self.max_length
1003
+
1004
+ # how this all works (illustrated on a causal decoder-only setup):
1005
+ # CTX CONT
1006
+ # inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1]
1007
+ # model \ \
1008
+ # logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the
1009
+ # cont_toks 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice
1010
+
1011
+ # when too long to fit in context, truncate from the left
1012
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
1013
+ inp = torch.tensor(
1014
+ (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1],
1015
+ dtype=torch.long,
1016
+ device=self.device,
1017
+ )
1018
+ (inplen,) = inp.shape
1019
+ elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
1020
+ inp = torch.tensor(
1021
+ (context_enc)[-self.max_length :],
1022
+ dtype=torch.long,
1023
+ device=self.device,
1024
+ )
1025
+ (inplen,) = inp.shape
1026
+
1027
+ # build encoder attn masks
1028
+ encoder_attns.append(torch.ones_like(inp))
1029
+
1030
+ cont = torch.tensor(
1031
+ (continuation_enc)[-self.max_length :],
1032
+ # TODO: left-shift these?
1033
+ # TODO: our code assumes we never end up truncating conts for either model type
1034
+ dtype=torch.long,
1035
+ device=self.device,
1036
+ )
1037
+ (contlen,) = cont.shape
1038
+
1039
+ conts.append(cont)
1040
+
1041
+ padding_len_cont = (
1042
+ max(padding_len_cont, contlen)
1043
+ if padding_len_cont is not None
1044
+ else contlen
1045
+ )
1046
+
1047
+ padding_len_inp = (
1048
+ max(padding_len_inp, inplen)
1049
+ if padding_len_inp is not None
1050
+ else inplen
1051
+ )
1052
+
1053
+ inps.append(inp) # [1, inp_length]
1054
+ cont_toks_list.append(continuation_enc)
1055
+ inplens.append(inplen)
1056
+
1057
+ # create encoder attn mask and batched conts, if seq2seq
1058
+ call_kwargs = {}
1059
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
1060
+ batched_inps = pad_and_concat(
1061
+ padding_len_inp, inps, padding_side="right"
1062
+ ) # [batch, padding_len_inp]
1063
+ elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
1064
+ # TODO: left-pad encoder inps and mask?
1065
+ batched_inps = pad_and_concat(
1066
+ padding_len_inp, inps
1067
+ ) # [batch, padding_len_inp]
1068
+ batched_conts = pad_and_concat(
1069
+ padding_len_cont, conts
1070
+ ) # [batch, padding_len_cont]
1071
+ batched_encoder_mask = pad_and_concat(
1072
+ padding_len_inp, encoder_attns
1073
+ ) # [batch, padding_len_inp]
1074
+ call_kwargs = {
1075
+ "attn_mask": batched_encoder_mask,
1076
+ "labels": batched_conts,
1077
+ }
1078
+
1079
+ multi_logits = F.log_softmax(
1080
+ self._model_call(batched_inps, **call_kwargs), dim=-1
1081
+ ) # [batch, padding_length (inp or cont), vocab]
1082
+
1083
+ for (request_str, ctx_tokens, _), logits, inplen, cont_toks in zip(
1084
+ chunk, multi_logits, inplens, cont_toks_list
1085
+ ):
1086
+ # Slice to original seq length
1087
+ contlen = len(cont_toks)
1088
+ # take only logits in the continuation
1089
+ # (discard context toks if decoder-only ; discard right-padding)
1090
+ # also discards + checks for "virtual tokens" in the causal LM's input window
1091
+ # from prompt/prefix tuning tokens, if applicable
1092
+ ctx_len = (
1093
+ inplen + (logits.shape[0] - padding_len_inp)
1094
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM
1095
+ else None
1096
+ )
1097
+ logits = self._select_cont_toks(logits, contlen=contlen, inplen=ctx_len)
1098
+ logits = logits.unsqueeze(0) # [1, seq, vocab]
1099
+
1100
+ # Check if per-token argmax is exactly equal to continuation
1101
+ greedy_tokens = logits.argmax(dim=-1)
1102
+
1103
+ # check for one-token continuation cache hits.
1104
+ # noop in case group_by != "contexts" or no cache hit and returns the
1105
+ # original args. Otherwise, expands the logits batch dimension and yields each
1106
+ # batch along with matching continuation tokens and prompt strings.
1107
+ # logits -> [1, seq, vocab]
1108
+ for request_str, cont_toks, logits in re_ord.get_cache(
1109
+ req_str=request_str,
1110
+ cxt_toks=ctx_tokens,
1111
+ cont_toks=cont_toks,
1112
+ logits=logits,
1113
+ ):
1114
+ cont_toks = torch.tensor(
1115
+ cont_toks, dtype=torch.long, device=self.device
1116
+ ).unsqueeze(0) # [1, seq]
1117
+ max_equal = (greedy_tokens == cont_toks).all()
1118
+
1119
+ # Obtain log-probs at the corresponding continuation token indices
1120
+ # last_token_slice = logits[:, -1, :].squeeze(0).tolist()
1121
+ logits = torch.gather(logits, 2, cont_toks.unsqueeze(-1)).squeeze(
1122
+ -1
1123
+ ) # [1, seq]
1124
+
1125
+ # Answer: (log prob, is-exact-match)
1126
+ answer = (float(logits.sum()), bool(max_equal))
1127
+
1128
+ res.append(answer)
1129
+
1130
+ self.cache_hook.add_partial("loglikelihood", request_str, answer)
1131
+ pbar.update(1)
1132
+
1133
+ pbar.close()
1134
+
1135
+ return re_ord.get_original(res)
1136
+
1137
+ def generate_until(
1138
+ self, requests: List[Instance], disable_tqdm: bool = False
1139
+ ) -> List[str]:
1140
+ res = []
1141
+
1142
+ def _collate(req: Tuple[str, dict]):
1143
+ """Defines the key for the sorted method"""
1144
+ # the negative sign on len(toks) sorts descending - this has a few advantages:
1145
+ # - time estimates will always be over not underestimates, which is more useful for planning
1146
+ # - to know the size of a batch when going through the list, you know the first one is always the batch
1147
+ # padded context length. this is useful to simplify the batching logic and more importantly to make
1148
+ # automatic adaptive batches much much easier to implement
1149
+ # - any OOMs will happen right away rather than near the end
1150
+ toks = self.tok_encode(req[0])
1151
+ return -len(toks), req[0]
1152
+
1153
+ pbar = tqdm(
1154
+ total=len(requests),
1155
+ disable=(disable_tqdm or (self.rank != 0)),
1156
+ desc="Running generate_until requests",
1157
+ )
1158
+ adaptive_batch_size = None
1159
+ if self.batch_size == "auto":
1160
+ # using rolling window with maximum context
1161
+ print("Passed argument batch_size = auto. Detecting largest batch size")
1162
+ batch_size = self._detect_batch_size()
1163
+ print(f"Determined Largest batch size: {batch_size}")
1164
+ adaptive_batch_size = batch_size
1165
+ # for each different set of kwargs, we execute all requests, by batch.
1166
+ batch_size = (
1167
+ self.batch_size
1168
+ if self.batch_size != "auto"
1169
+ else adaptive_batch_size
1170
+ if adaptive_batch_size is not None
1171
+ else 0
1172
+ )
1173
+ batch_fn = (
1174
+ self._batch_scheduler
1175
+ if self.batch_size == "auto" and not adaptive_batch_size
1176
+ else None
1177
+ )
1178
+
1179
+ # we group requests by their generation_kwargs,
1180
+ # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
1181
+ # in the same batch.
1182
+ # group_fn=lambda x: x[1] -> x=(context, gen_kwargs)
1183
+ re_ords = Collator(
1184
+ [reg.args for reg in requests],
1185
+ sort_fn=_collate,
1186
+ group_by="gen_kwargs",
1187
+ group_fn=lambda x: x[1],
1188
+ )
1189
+ chunks = re_ords.get_batched(n=batch_size, batch_fn=batch_fn)
1190
+ for chunk in chunks:
1191
+ contexts, all_gen_kwargs = zip(*chunk)
1192
+ # we assume all gen kwargs in the batch are the same
1193
+ # this is safe to assume because the `grouper` object ensures it.
1194
+ gen_kwargs = all_gen_kwargs[0]
1195
+ # unpack our keyword arguments.
1196
+ until = None
1197
+ if isinstance(gen_kwargs, dict):
1198
+ kwargs = copy.deepcopy(gen_kwargs) # edge case for repeats > 1
1199
+ if "until" in kwargs.keys():
1200
+ until = kwargs.pop("until")
1201
+ if isinstance(until, str):
1202
+ until = [until]
1203
+ elif not isinstance(until, list):
1204
+ raise ValueError(
1205
+ f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}"
1206
+ )
1207
+ else:
1208
+ raise ValueError(
1209
+ f"Expected `kwargs` to be of type `dict` but got {type(gen_kwargs)}"
1210
+ )
1211
+ # add EOS token to stop sequences
1212
+ eos = self.tok_decode(self.eot_token_id, skip_special_tokens=False)
1213
+ if not until:
1214
+ until = [eos]
1215
+ else:
1216
+ until.append(eos)
1217
+ if "max_gen_toks" in kwargs.keys():
1218
+ max_gen_toks = kwargs.pop("max_gen_toks")
1219
+ else:
1220
+ max_gen_toks = self.max_gen_toks
1221
+
1222
+ # set the max length in tokens of inputs ("context_enc")
1223
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
1224
+ # max len for inputs = max length, minus room to generate the max new tokens
1225
+ max_ctx_len = self.max_length - max_gen_toks
1226
+ elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
1227
+ # max len for inputs = encoder's whole max_length
1228
+ max_ctx_len = self.max_length
1229
+
1230
+ # encode, pad, and truncate contexts for this batch
1231
+ context_enc, attn_masks = self.tok_batch_encode(
1232
+ contexts,
1233
+ left_truncate_len=max_ctx_len,
1234
+ truncation=self.truncation,
1235
+ )
1236
+ context_enc = context_enc.to(self.device)
1237
+ attn_masks = attn_masks.to(self.device)
1238
+
1239
+ if "max_length" not in kwargs:
1240
+ kwargs["max_length"] = context_enc.shape[1] + max_gen_toks
1241
+
1242
+ # perform batched generation
1243
+ cont = self._model_generate(
1244
+ context=context_enc,
1245
+ attention_mask=attn_masks,
1246
+ stop=until,
1247
+ **kwargs,
1248
+ )
1249
+
1250
+ cont_toks_list = cont.tolist()
1251
+ for cont_toks, context in zip(cont_toks_list, contexts):
1252
+ # discard context + left-padding toks if using causal decoder-only LM
1253
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
1254
+ cont_toks = cont_toks[context_enc.shape[1] :]
1255
+
1256
+ s = self.tok_decode(cont_toks)
1257
+
1258
+ # use secondary stop seqs to cut off should-have-been-stopped content post-hoc
1259
+ for term in until:
1260
+ if len(term) > 0:
1261
+ # ignore '' separator,
1262
+ # for seq2seq case where self.tok_decode(self.eot_token_id) = ''
1263
+ s = s.split(term)[0]
1264
+
1265
+ res.append(s)
1266
+
1267
+ self.cache_hook.add_partial("generate_until", (context, gen_kwargs), s)
1268
+ pbar.update(1)
1269
+ # reorder this group of results back to original unsorted form
1270
+ res = re_ords.get_original(res)
1271
+
1272
+ pbar.close()
1273
+
1274
+ return res
lm-evaluation-harness/lm_eval/models/mamba_lm.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Union
2
+
3
+ import torch
4
+
5
+ import lm_eval.models.utils
6
+ from lm_eval.api.registry import register_model
7
+ from lm_eval.models.huggingface import HFLM
8
+
9
+
10
+ @register_model("mamba_ssm")
11
+ class MambaLMWrapper(HFLM):
12
+ def __init__(
13
+ self,
14
+ pretrained="state-spaces/mamba-130m",
15
+ **kwargs,
16
+ ) -> None:
17
+ """
18
+ Mamba (via the `mamba_ssm` package) supports the following args:
19
+ ```
20
+ d_model: int,
21
+ n_layer: int,
22
+ vocab_size: int,
23
+ initializer_cfg=None,
24
+ pad_vocab_size_multiple: int = 1,
25
+ ssm_cfg=None,
26
+ norm_epsilon: float = 1e-5,
27
+ rms_norm: bool = False,
28
+ initializer_cfg=None,
29
+ fused_add_norm=False,
30
+ residual_in_fp32=False,
31
+ ```
32
+
33
+ See https://github.com/state-spaces/mamba/blob/main/mamba_ssm/models/mixer_seq_simple.py#L175 for more info.
34
+ The above can all be passed via `--model_args` or to this __init__() directly
35
+ but we recommend placing many of these within the config.json file uploaded alongside your
36
+ Mamba model to the HF Hub instead.
37
+ All other HuggingFace from_pretrained() kwargs
38
+ such as those related to
39
+ `parallelize=True`, PEFT, autoGPTQ,
40
+ or any sub-configurations of these advanced args,
41
+ are unsupported by the `mamba_ssm` package.
42
+
43
+ The HFLM arguments
44
+
45
+ `backend`, `tokenizer`, `truncation`, `max_length`,
46
+ `device`, `dtype`, `batch_size`, `max_batch_size`, `trust_remote_code`, `use_fast_tokenizer`
47
+
48
+ Are all supported by Mamba where they do not conflict
49
+ with Mamba-specific restrictions such as causal LMs only.
50
+ """
51
+
52
+ if "backend" in kwargs:
53
+ # mamba currently only supports causal models
54
+ assert kwargs["backend"] == "causal"
55
+
56
+ super().__init__(
57
+ pretrained=pretrained,
58
+ # set appropriate defaults for tokenizer, max length, etc
59
+ backend=kwargs.pop("backend", "causal"),
60
+ tokenizer=kwargs.pop("tokenizer", "EleutherAI/gpt-neox-20b"),
61
+ max_length=kwargs.pop("max_length", 2048),
62
+ **kwargs,
63
+ )
64
+
65
+ def _get_config(
66
+ self,
67
+ pretrained: str,
68
+ **kwargs,
69
+ ) -> None:
70
+ try:
71
+ from mamba_ssm.utils.hf import load_config_hf # noqa: F811
72
+ except ModuleNotFoundError:
73
+ raise Exception(
74
+ "attempted to use 'mamba_ssm' LM type, but package `mamba_ssm` is not installed. \
75
+ please install mamba via `pip install lm-eval[mamba]` or `pip install -e .[mamba]`",
76
+ )
77
+
78
+ self._config = load_config_hf(pretrained)
79
+
80
+ def _create_model(
81
+ self,
82
+ pretrained: str,
83
+ dtype: Optional[Union[str, torch.dtype]] = "float16",
84
+ # no `parallelize=True` options
85
+ # no PEFT and quantization options
86
+ # Mamba does not support arbitrary HF from_pretrained() args
87
+ **kwargs,
88
+ ) -> None:
89
+ try:
90
+ from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel # noqa: F811
91
+ except ModuleNotFoundError:
92
+ raise Exception(
93
+ "attempted to use 'mamba_ssm' LM type, but package `mamba_ssm` is not installed. \
94
+ please install mamba via `pip install lm-eval[mamba]` or `pip install -e .[mamba]`",
95
+ )
96
+
97
+ self._model = MambaLMHeadModel.from_pretrained(
98
+ pretrained,
99
+ device=self._device,
100
+ dtype=torch.float16
101
+ if dtype == "auto"
102
+ else lm_eval.models.utils.get_dtype(dtype),
103
+ )
104
+
105
+ def _model_generate(self, context, max_length, stop, **generation_kwargs):
106
+ for key in ("do_sample", "attention_mask"):
107
+ if key in generation_kwargs:
108
+ generation_kwargs.pop(key)
109
+
110
+ # mamba's custom GenerationMixin currently does not support
111
+ # passing stopping criteria.
112
+ # for the time being, we simply generate to max length,
113
+ # then truncate (equivalent result)
114
+ # -- this should be revisited to speed up generation
115
+ # stopping_criteria = stop_sequences_criteria(
116
+ # self.tokenizer, stop, 1, context.shape[0]
117
+ # )
118
+
119
+ return self.model.generate(
120
+ input_ids=context,
121
+ max_length=max_length,
122
+ # stopping_criteria=stopping_criteria,
123
+ # pad_token_id=self.tokenizer.pad_token_id,
124
+ # use_cache=True,
125
+ **generation_kwargs,
126
+ )
lm-evaluation-harness/lm_eval/models/nemo_lm.py ADDED
@@ -0,0 +1,537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import importlib
16
+ import pathlib
17
+ from copy import deepcopy
18
+ from typing import List, Literal
19
+
20
+ import filelock
21
+ import numpy as np
22
+ import torch
23
+ from tqdm import tqdm
24
+
25
+ from lm_eval.api.instance import Instance
26
+ from lm_eval.api.model import LM
27
+ from lm_eval.api.registry import register_model
28
+ from lm_eval.models.utils import Collator
29
+ from lm_eval.utils import (
30
+ eval_logger,
31
+ get_rolling_token_windows,
32
+ make_disjoint_window,
33
+ simple_parse_args_string,
34
+ )
35
+
36
+
37
+ def _patch_pretrained_cfg(
38
+ pretrained_cfg, trainer, tensor_model_parallel_size, pipeline_model_parallel_size
39
+ ):
40
+ try:
41
+ import omegaconf
42
+ except ModuleNotFoundError:
43
+ raise Exception(
44
+ "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed"
45
+ "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, "
46
+ "or installing nemo following https://github.com/NVIDIA/NeMo.",
47
+ )
48
+
49
+ omegaconf.OmegaConf.set_struct(pretrained_cfg, True)
50
+ with omegaconf.open_dict(pretrained_cfg):
51
+ attributes_to_update = {
52
+ "sequence_parallel": False,
53
+ "activations_checkpoint_granularity": None,
54
+ "activations_checkpoint_method": None,
55
+ "precision": trainer.precision,
56
+ "global_batch_size": None,
57
+ "tensor_model_parallel_size": tensor_model_parallel_size,
58
+ "pipeline_model_parallel_size": pipeline_model_parallel_size,
59
+ "apply_rope_fusion": False,
60
+ }
61
+ for name, value in attributes_to_update.items():
62
+ if hasattr(pretrained_cfg, name):
63
+ pretrained_cfg[name] = value
64
+ return pretrained_cfg
65
+
66
+
67
+ def _get_target_from_class(target_class) -> str:
68
+ return f"{target_class.__module__}.{target_class.__name__}"
69
+
70
+
71
+ def load_model(
72
+ model_path: str,
73
+ trainer,
74
+ tensor_model_parallel_size: int,
75
+ pipeline_model_parallel_size: int,
76
+ ) -> torch.nn.Module:
77
+ try:
78
+ from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import (
79
+ MegatronGPTModel,
80
+ )
81
+ from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector
82
+ except ModuleNotFoundError:
83
+ raise Exception(
84
+ "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed"
85
+ "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, "
86
+ "or installing nemo following https://github.com/NVIDIA/NeMo.",
87
+ )
88
+ model_path = pathlib.Path(model_path)
89
+
90
+ save_restore_connector = NLPSaveRestoreConnector()
91
+ if model_path.is_dir():
92
+ save_restore_connector.model_extracted_dir = model_path.as_posix()
93
+ pretrained_cfg = save_restore_connector.restore_from(
94
+ None, model_path.as_posix(), return_config=True, trainer=trainer
95
+ )
96
+ if not hasattr(pretrained_cfg, "target"):
97
+ pretrained_cfg["target"] = _get_target_from_class(MegatronGPTModel)
98
+
99
+ pretrained_cfg = _patch_pretrained_cfg(
100
+ pretrained_cfg,
101
+ trainer,
102
+ tensor_model_parallel_size=tensor_model_parallel_size,
103
+ pipeline_model_parallel_size=pipeline_model_parallel_size,
104
+ )
105
+
106
+ model_to_load_path = model_path
107
+ override_config = pretrained_cfg
108
+
109
+ module_name, class_name = override_config.target.rsplit(".", 1)
110
+ model_class = getattr(importlib.import_module(module_name), class_name)
111
+
112
+ # monkeypatch _build_tokenizer method to be process-safe
113
+ tokenizer_lock = filelock.FileLock(f"/tmp/{model_path.name}.tokenizer.lock")
114
+
115
+ def _synced_build_tokenizer(self):
116
+ with tokenizer_lock:
117
+ self._original_build_tokenizer()
118
+
119
+ model_class._original_build_tokenizer = model_class._build_tokenizer
120
+ model_class._build_tokenizer = _synced_build_tokenizer
121
+
122
+ model = model_class.restore_from(
123
+ restore_path=model_to_load_path.as_posix(),
124
+ trainer=trainer,
125
+ override_config_path=override_config,
126
+ save_restore_connector=save_restore_connector,
127
+ map_location=f"cuda:{trainer.local_rank}",
128
+ )
129
+
130
+ model.freeze()
131
+ model.training = False
132
+ try:
133
+ # Have to turn off activations_checkpoint_method for inference
134
+ model.model.language_model.encoder.activations_checkpoint_method = None
135
+ except AttributeError:
136
+ pass
137
+ return model
138
+
139
+
140
+ def setup_distributed_environment(trainer):
141
+ try:
142
+ from nemo.utils.app_state import AppState
143
+ except ModuleNotFoundError:
144
+ raise Exception(
145
+ "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed"
146
+ "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, "
147
+ "or installing nemo following https://github.com/NVIDIA/NeMo.",
148
+ )
149
+
150
+ def dummy():
151
+ return
152
+
153
+ if trainer.strategy.launcher is not None:
154
+ trainer.strategy.launcher.launch(dummy, trainer=trainer)
155
+ trainer.strategy.setup_environment()
156
+
157
+ app_state = AppState()
158
+
159
+ return app_state
160
+
161
+
162
+ @register_model("nemo_lm")
163
+ class NeMoLM(LM):
164
+ def __init__(
165
+ self,
166
+ path: str,
167
+ max_length: int = 4096,
168
+ batch_size: int = 1,
169
+ max_gen_toks: int = 256,
170
+ devices: int = 1,
171
+ num_nodes: int = 1,
172
+ tensor_model_parallel_size: int = 1,
173
+ pipeline_model_parallel_size: int = 1,
174
+ precision: Literal[
175
+ "16-mixed",
176
+ "bf16-mixed",
177
+ "32-true",
178
+ "64-true",
179
+ 64,
180
+ 32,
181
+ 16,
182
+ "64",
183
+ "32",
184
+ "16",
185
+ "bf16",
186
+ ] = "bf16",
187
+ **kwargs,
188
+ ):
189
+ try:
190
+ from nemo.collections.nlp.modules.common.text_generation_utils import (
191
+ generate,
192
+ )
193
+ from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
194
+ from pytorch_lightning.trainer.trainer import Trainer
195
+
196
+ self.generate = generate
197
+ except ModuleNotFoundError:
198
+ raise Exception(
199
+ "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed"
200
+ "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, "
201
+ "or installing nemo following https://github.com/NVIDIA/NeMo.",
202
+ )
203
+
204
+ super().__init__()
205
+
206
+ if (
207
+ tensor_model_parallel_size == 1
208
+ and pipeline_model_parallel_size == 1
209
+ and devices > 1
210
+ ):
211
+ eval_logger.info(
212
+ f"The number of data replicas for evaluation is {devices}."
213
+ )
214
+ eval_logger.info(f"The total number of devices is {devices}.")
215
+ eval_logger.info(
216
+ "No tensor parallelism or pipeline parallelism is applied."
217
+ )
218
+
219
+ elif tensor_model_parallel_size * pipeline_model_parallel_size == devices:
220
+ eval_logger.info(
221
+ f"Setting tensor parallelism to {tensor_model_parallel_size} and pipeline parallelism to {pipeline_model_parallel_size}."
222
+ )
223
+ eval_logger.info(f"The total number of devices is {devices}.")
224
+ eval_logger.info("No data parallelism is applied.")
225
+
226
+ else:
227
+ raise ValueError(
228
+ "Please set the product of tensor_model_parallel_size and pipeline_model_parallel_size"
229
+ "equal to the specified number of devices."
230
+ )
231
+
232
+ if num_nodes > 1:
233
+ raise ValueError(
234
+ "A number of nodes greater than 1 is not supported yet. Please set num_nodes as 1."
235
+ )
236
+
237
+ trainer = Trainer(
238
+ strategy=NLPDDPStrategy(),
239
+ devices=devices,
240
+ accelerator="gpu",
241
+ num_nodes=num_nodes,
242
+ precision=precision,
243
+ logger=False,
244
+ enable_checkpointing=False,
245
+ use_distributed_sampler=False,
246
+ )
247
+ # Modify the following flags only for data replication
248
+ if (
249
+ tensor_model_parallel_size == 1
250
+ and pipeline_model_parallel_size == 1
251
+ and devices > 1
252
+ ):
253
+ self._device = torch.device(f"cuda:{trainer.global_rank}")
254
+ self._rank = trainer.global_rank
255
+ self._world_size = trainer.world_size
256
+ self.model = load_model(
257
+ path,
258
+ trainer,
259
+ tensor_model_parallel_size=tensor_model_parallel_size,
260
+ pipeline_model_parallel_size=pipeline_model_parallel_size,
261
+ ).cuda()
262
+ self.tokenizer = self.model.tokenizer
263
+ self.app_state = setup_distributed_environment(trainer)
264
+
265
+ self._max_length = max_length
266
+ self._batch_size = int(batch_size)
267
+ self._max_gen_toks = max_gen_toks
268
+
269
+ @classmethod
270
+ def create_from_arg_string(cls, arg_string, additional_config=None):
271
+ args = simple_parse_args_string(arg_string)
272
+ if additional_config:
273
+ args["batch_size"] = additional_config.get("batch_size", 1)
274
+
275
+ return cls(**args)
276
+
277
+ @property
278
+ def eot_token_id(self):
279
+ try:
280
+ return self.tokenizer.eos_id
281
+ except AttributeError:
282
+ return None
283
+
284
+ @property
285
+ def max_length(self):
286
+ return self._max_length
287
+
288
+ @property
289
+ def max_gen_toks(self):
290
+ return self._max_gen_toks
291
+
292
+ @property
293
+ def batch_size(self):
294
+ return self._batch_size
295
+
296
+ @property
297
+ def device(self):
298
+ return self._device
299
+
300
+ @property
301
+ def rank(self):
302
+ return self._rank
303
+
304
+ @property
305
+ def world_size(self):
306
+ return self._world_size
307
+
308
+ @property
309
+ def accelerator(self):
310
+ return self._Accelerator(self.world_size)
311
+
312
+ class _Accelerator:
313
+ def __init__(self, world_size):
314
+ self.world_size = world_size
315
+
316
+ def wait_for_everyone(self):
317
+ torch.distributed.barrier()
318
+
319
+ def gather(self, local_tensor):
320
+ gathered_tensors = [
321
+ torch.zeros(1, dtype=local_tensor.dtype).cuda()
322
+ for _ in range(self.world_size)
323
+ ]
324
+ torch.distributed.all_gather(gathered_tensors, local_tensor)
325
+ return torch.cat(gathered_tensors)
326
+
327
+ def tok_encode(self, string: str):
328
+ return self.tokenizer.text_to_ids(string)
329
+
330
+ def tok_decode(self, tokens):
331
+ return self.tokenizer.ids_to_text(tokens)
332
+
333
+ def _encode_pair(self, context, continuation):
334
+ n_spaces = len(context) - len(context.rstrip())
335
+ if n_spaces > 0:
336
+ continuation = context[-n_spaces:] + continuation
337
+ context = context[:-n_spaces]
338
+ whole_enc = self.tok_encode(context + continuation)
339
+ context_enc = self.tok_encode(context)
340
+ context_enc_len = len(context_enc)
341
+ continuation_enc = whole_enc[context_enc_len:]
342
+ return context_enc, continuation_enc
343
+
344
+ def loglikelihood(self, requests):
345
+ new_reqs = []
346
+ for context, continuation in [req.args for req in requests]:
347
+ if context == "":
348
+ # end of text as context
349
+ context_enc, continuation_enc = (
350
+ [self.eot_token_id],
351
+ self.tok_encode(continuation),
352
+ )
353
+ else:
354
+ context_enc, continuation_enc = self._encode_pair(context, continuation)
355
+
356
+ new_reqs.append(((context, continuation), context_enc, continuation_enc))
357
+
358
+ return self._loglikelihood_tokens(new_reqs)
359
+
360
+ def loglikelihood_rolling(
361
+ self, requests: List[Instance], disable_tqdm: bool = False
362
+ ) -> List[float]:
363
+ loglikelihoods = []
364
+
365
+ for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm):
366
+ rolling_token_windows = list(
367
+ map(
368
+ make_disjoint_window,
369
+ get_rolling_token_windows(
370
+ token_list=self.tok_encode(string),
371
+ prefix_token=self.eot_token_id,
372
+ max_seq_len=self.max_length - 1,
373
+ context_len=1,
374
+ ),
375
+ )
376
+ )
377
+
378
+ rolling_token_windows = [(None,) + x for x in rolling_token_windows]
379
+
380
+ string_nll = self._loglikelihood_tokens(
381
+ rolling_token_windows,
382
+ )
383
+
384
+ # discard is_greedy
385
+ string_nll = [x[0] for x in string_nll]
386
+
387
+ string_nll = sum(string_nll)
388
+ loglikelihoods.append(string_nll)
389
+ return loglikelihoods
390
+
391
+ def _loglikelihood_tokens(self, requests, disable_tqdm=False):
392
+ res = []
393
+
394
+ def _collate(x):
395
+ toks = x[1] + x[2]
396
+ return -len(toks), tuple(toks)
397
+
398
+ re_ord = Collator(requests, sort_fn=_collate)
399
+ chunks = re_ord.get_batched(n=self.batch_size, batch_fn=None)
400
+ pbar = tqdm(
401
+ total=len(requests),
402
+ disable=(disable_tqdm or (self.rank != 0)),
403
+ desc="Running loglikelihood requests",
404
+ )
405
+ for chunk in chunks:
406
+ inps = []
407
+ ctxlens = []
408
+ contlens = []
409
+
410
+ for _, context_enc, continuation_enc in chunk:
411
+ # Leave one token for generation. Tokens_to_generate = 0 breaks NeMo.
412
+ inp = (context_enc + continuation_enc)[-(self.max_length - 1) :]
413
+
414
+ ctxlen = len(context_enc) - max(
415
+ 0, len(context_enc) + len(continuation_enc) - (self.max_length - 1)
416
+ )
417
+ ctxlens.append(ctxlen)
418
+ contlens.append(len(continuation_enc))
419
+
420
+ inps.append(self.tok_decode(inp))
421
+
422
+ output = self.generate(
423
+ self.model,
424
+ inputs=inps,
425
+ tokens_to_generate=1,
426
+ min_tokens_to_generate=1,
427
+ compute_logprob=True,
428
+ all_probs=True,
429
+ )
430
+
431
+ batch_token_ids = np.asarray(output["token_ids"])[:, :-1]
432
+ batch_logprobs = output["logprob"][:, :-1]
433
+ batch_full_logprob = output["full_logprob"][:, :-1, :]
434
+
435
+ # Compute greedy tokens for entire batch rather than calling it with proper ctxlen for each sample.
436
+ # Additional tokens for each sample will be trimmed later.
437
+ min_ctxlen = min(ctxlens)
438
+
439
+ # Use min_ctxlen-1 instead of min_ctxlen since full_logprobs are not returns for the first token.
440
+ batch_greedy_tokens = (
441
+ torch.argmax(batch_full_logprob[:, min_ctxlen - 1 :, :], -1)
442
+ .cpu()
443
+ .numpy()
444
+ )
445
+
446
+ for token_ids, greedy_tokens, logprobs, ctxlen, contlen, (
447
+ cache_key,
448
+ _,
449
+ _,
450
+ ) in zip(
451
+ batch_token_ids,
452
+ batch_greedy_tokens,
453
+ batch_logprobs,
454
+ ctxlens,
455
+ contlens,
456
+ chunk,
457
+ ):
458
+ # Trim at contlen since shorter contexts in a batch will have more than one token generated.
459
+ # Use ctxlen-1 instead of ctxlen same as for full_logprob in batch_greedy_tokens calculation
460
+ logprobs = (logprobs[ctxlen - 1 :])[:contlen]
461
+ logprob = sum(logprobs).tolist()
462
+
463
+ continuation_tokens = (token_ids[ctxlen:])[:contlen]
464
+ len_diff = ctxlen - min_ctxlen
465
+ is_greedy = continuation_tokens == (greedy_tokens[len_diff:])[:contlen]
466
+ if not isinstance(is_greedy, bool):
467
+ is_greedy = is_greedy.all()
468
+ answer = (logprob, is_greedy)
469
+
470
+ if cache_key is not None:
471
+ self.cache_hook.add_partial("loglikelihood", cache_key, answer)
472
+
473
+ res.append(answer)
474
+ pbar.update(1)
475
+
476
+ pbar.close()
477
+
478
+ return re_ord.get_original(res)
479
+
480
+ def generate_until(self, requests):
481
+ if not requests:
482
+ return []
483
+ res = []
484
+
485
+ def get_until(req_args):
486
+ until = req_args.get("until", [])
487
+ until = deepcopy(until) # prevent from modifying req_args for cache_key
488
+ if self.tokenizer.ids_to_tokens([self.eot_token_id])[0] not in until:
489
+ until.append(self.tokenizer.ids_to_tokens([self.eot_token_id])[0])
490
+ return until
491
+
492
+ def _collate(x):
493
+ toks = self.tok_encode(x[0])
494
+ return len(toks), x[0]
495
+
496
+ re_ords = Collator(
497
+ [reg.args for reg in requests], sort_fn=_collate, group_by="gen_kwargs"
498
+ )
499
+ chunks = re_ords.get_batched(n=self.batch_size, batch_fn=None)
500
+ for chunk in chunks:
501
+ contexts, all_gen_kwargs = zip(*chunk)
502
+ # we assume all gen kwargs in the batch are the same
503
+ # this is safe to assume because the `grouper` object ensures it.
504
+ req_args = all_gen_kwargs[0]
505
+ # unpack our keyword arguments.
506
+ until = get_until(req_args)
507
+ max_gen_toks = req_args.get("max_gen_toks", self.max_gen_toks)
508
+
509
+ remaining_length = self.max_length - max_gen_toks
510
+ contexts = []
511
+ for context, _ in chunk:
512
+ encoded_context = self.tok_encode(context)
513
+ encoded_context = encoded_context[-remaining_length:]
514
+ contexts.append(self.tok_decode(encoded_context))
515
+
516
+ output = self.generate(
517
+ self.model,
518
+ inputs=contexts,
519
+ tokens_to_generate=max_gen_toks,
520
+ end_strings=until,
521
+ greedy=True,
522
+ )
523
+
524
+ answers = output["sentences"]
525
+
526
+ continuations = []
527
+ for context, answer in zip(contexts, answers):
528
+ continuations.append(answer[len(context) :])
529
+
530
+ for term in until:
531
+ continuations = [answer.split(term)[0] for answer in continuations]
532
+
533
+ for request, answer in zip(chunk, continuations):
534
+ self.cache_hook.add_partial("greedy_until", request, answer)
535
+ res.append(answer)
536
+
537
+ return re_ords.get_original(res)
lm-evaluation-harness/lm_eval/models/neuralmagic.py ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from typing import List, Optional, Tuple, Union
3
+
4
+ import numpy
5
+ import transformers
6
+ from tqdm import tqdm
7
+
8
+ import lm_eval.models.utils
9
+ from lm_eval import utils
10
+ from lm_eval.api.instance import Instance
11
+ from lm_eval.api.model import LM
12
+ from lm_eval.api.registry import register_model
13
+ from lm_eval.models.huggingface import HFLM
14
+
15
+
16
+ eval_logger = utils.eval_logger
17
+
18
+
19
+ @register_model("sparseml")
20
+ class SparseMLLM(HFLM):
21
+ """
22
+ SparseML is an open-source model optimization toolkit that enables you to create
23
+ inference-optimized sparse models using pruning, quantization, and distillation
24
+ algorithms. Models optimized with SparseML can then be exported to the ONNX format and
25
+ deployed with DeepSparse for GPU-class performance on CPU hardware.
26
+
27
+ This class is a wrapper around the HuggingFace LM class to enable SparseML
28
+ integration with the lm-evaluation-harness.
29
+ """
30
+
31
+ def _create_model(
32
+ self,
33
+ pretrained: str,
34
+ revision: Optional[str] = "main",
35
+ dtype: Optional[str] = "auto",
36
+ trust_remote_code: Optional[bool] = False,
37
+ **kwargs,
38
+ ) -> None:
39
+ try:
40
+ from sparseml.transformers import SparseAutoModelForCausalLM
41
+ except ModuleNotFoundError:
42
+ raise Exception(
43
+ "Package `sparseml` is not installed. "
44
+ "Please install it via `pip install sparseml[transformers]`"
45
+ )
46
+
47
+ model_kwargs = kwargs if kwargs else {}
48
+
49
+ if "device_map" not in model_kwargs:
50
+ # set a device_map to initialize model on the right GPU.
51
+ # this is needed because it seems that the default behavior
52
+ # for quantized models now seems to be device_map="auto"
53
+ # which breaks data-parallel mode.
54
+ if hasattr(self, "accelerator"):
55
+ model_kwargs.update(
56
+ {"device_map": {"": f"cuda:{self.accelerator.local_process_index}"}}
57
+ )
58
+ else:
59
+ model_kwargs.update({"device_map": {"": str(self.device)}})
60
+
61
+ relevant_kwarg_names = [
62
+ "offload_folder",
63
+ "device_map",
64
+ ]
65
+ relevant_kwargs = {
66
+ k: v for k, v in model_kwargs.items() if k in relevant_kwarg_names
67
+ }
68
+
69
+ # Log the difference between model_kwargs and relevant_kwargs so we can see
70
+ # what is being ignored
71
+ ignored_kwargs = {}
72
+ for k, v in model_kwargs.items():
73
+ if k not in relevant_kwargs.keys():
74
+ ignored_kwargs[k] = v
75
+ eval_logger.warning(
76
+ f"The sparseml integration is ignoring the following kwargs that are specified: {ignored_kwargs}"
77
+ )
78
+
79
+ model = SparseAutoModelForCausalLM.from_pretrained(
80
+ pretrained,
81
+ revision=revision,
82
+ torch_dtype=lm_eval.models.utils.get_dtype(dtype),
83
+ trust_remote_code=trust_remote_code,
84
+ **relevant_kwargs,
85
+ )
86
+ self._model = model
87
+
88
+ def _get_config(self, pretrained: str, **kwargs) -> None:
89
+ try:
90
+ from sparseml.transformers import SparseAutoConfig
91
+ except ModuleNotFoundError:
92
+ raise Exception(
93
+ "Package `sparseml` is not installed. "
94
+ "Please install it via `pip install sparseml[transformers]`"
95
+ )
96
+
97
+ self._config = SparseAutoConfig.from_pretrained(
98
+ pretrained_model_name_or_path=pretrained, **kwargs
99
+ )
100
+
101
+ def _create_tokenizer(
102
+ self,
103
+ pretrained: Union[str, transformers.PreTrainedModel],
104
+ tokenizer: Optional[
105
+ Union[
106
+ str,
107
+ transformers.PreTrainedTokenizer,
108
+ transformers.PreTrainedTokenizerFast,
109
+ ]
110
+ ],
111
+ **kwargs,
112
+ ) -> None:
113
+ try:
114
+ from sparseml.transformers import SparseAutoTokenizer
115
+ except ModuleNotFoundError:
116
+ raise Exception(
117
+ "Package `sparseml` is not installed. "
118
+ "Please install it via `pip install sparseml[transformers]`"
119
+ )
120
+
121
+ if tokenizer:
122
+ if isinstance(tokenizer, str):
123
+ self.tokenizer = SparseAutoTokenizer.from_pretrained(
124
+ tokenizer,
125
+ **kwargs,
126
+ )
127
+ else:
128
+ assert isinstance(
129
+ tokenizer, transformers.PreTrainedTokenizer
130
+ ) or isinstance(tokenizer, transformers.PreTrainedTokenizerFast)
131
+ self.tokenizer = tokenizer
132
+ else:
133
+ # Get tokenizer based on 'pretrained'
134
+ if isinstance(pretrained, str):
135
+ model_name = pretrained
136
+ else:
137
+ # get the HF hub name via accessor on model
138
+ model_name = self.model.name_or_path
139
+ self.tokenizer = SparseAutoTokenizer.from_pretrained(
140
+ model_name,
141
+ **kwargs,
142
+ )
143
+ return None
144
+
145
+
146
+ @register_model("deepsparse")
147
+ class DeepSparseLM(LM):
148
+ """
149
+ Wrapper around DeepSparse, a sparsity-aware deep learning
150
+ inference runtime for CPUs, to make it compatible with the
151
+ lm-evaluation-harness.
152
+ """
153
+
154
+ _DEFAULT_MAX_LENGTH = 2048
155
+
156
+ def __init__(
157
+ self,
158
+ pretrained: str,
159
+ tokenizer: Optional[
160
+ Union[
161
+ str,
162
+ transformers.PreTrainedTokenizer,
163
+ transformers.PreTrainedTokenizerFast,
164
+ ]
165
+ ] = None,
166
+ batch_size: Optional[Union[int, str]] = 1,
167
+ max_gen_toks: Optional[int] = 256,
168
+ max_length: Optional[int] = None,
169
+ ):
170
+ super().__init__()
171
+
172
+ try:
173
+ import deepsparse
174
+ except ModuleNotFoundError:
175
+ raise Exception(
176
+ "Package `deepsparse` is not installed. "
177
+ "Please install it via `pip install deepsparse[transformers]`"
178
+ )
179
+
180
+ if isinstance(batch_size, str) and not batch_size.isdigit():
181
+ eval_logger.warning(
182
+ f"batch_size={batch_size} is not valid for deepsparse because it is not an integer. "
183
+ "Ignoring and using the default of 1."
184
+ )
185
+ batch_size = 1
186
+
187
+ self.batch_size = int(batch_size)
188
+ self._max_length = max_length if max_length else self._DEFAULT_MAX_LENGTH
189
+ self._max_gen_toks = max_gen_toks
190
+ self.batch_sizes = {}
191
+
192
+ # Initialize new model and tokenizer instances
193
+ self.model = deepsparse.TextGeneration(
194
+ model_path=pretrained,
195
+ sequence_length=self._max_length,
196
+ batch_size=batch_size,
197
+ )
198
+ self.tokenizer = tokenizer if tokenizer else self.model.tokenizer
199
+ self.config = self.model.config
200
+
201
+ def tok_encode(self, string: str) -> List[int]:
202
+ return self.tokenizer.encode(string)
203
+
204
+ def tok_decode(self, tokens: List[int]) -> str:
205
+ return self.tokenizer.decode(tokens)
206
+
207
+ @property
208
+ def eot_token_id(self):
209
+ # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
210
+ return self.tokenizer.eos_token_id
211
+
212
+ @property
213
+ def prefix_token_id(self):
214
+ # it is used as prefix for loglikelihood
215
+ if self.tokenizer.bos_token_id is not None:
216
+ return self.tokenizer.bos_token_id
217
+ return self.tokenizer.eos_token_id
218
+
219
+ @property
220
+ def max_length(self) -> int:
221
+ return self._max_length
222
+
223
+ @property
224
+ def max_gen_toks(self) -> int:
225
+ return self._max_gen_toks
226
+
227
+ def loglikelihood(self, requests) -> List[Tuple[float, bool]]:
228
+ """
229
+ Copied directly from
230
+ https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/models/huggingface.py
231
+ """
232
+ new_reqs = []
233
+ for context, continuation in [req.args for req in requests]:
234
+ if context == "":
235
+ raise NotImplementedError(
236
+ "Implementing empty context is not supported yet"
237
+ )
238
+ context_enc, continuation_enc = self._encode_pair(context, continuation)
239
+
240
+ new_reqs.append(((context, continuation), context_enc, continuation_enc))
241
+
242
+ return self._loglikelihood_tokens(new_reqs)
243
+
244
+ def _loglikelihood_tokens(
245
+ self,
246
+ requests: List[Tuple[Tuple[str, str], List[int], List[int]]],
247
+ disable_tqdm: bool = False,
248
+ ) -> List[Tuple[float, bool]]:
249
+ """
250
+ The function to compute the loglikelihood of the continuation
251
+ tokens given the context tokens.
252
+
253
+ This function is an adapted version of the original function from
254
+ https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/models/huggingface.py
255
+ """
256
+ res = []
257
+
258
+ def _collate(x):
259
+ """Defines the key for the sorted method"""
260
+ toks = x[1] + x[2]
261
+ return -len(toks), tuple(toks)
262
+
263
+ re_ord = utils.Reorderer(requests, _collate)
264
+
265
+ for chunk in tqdm(
266
+ list(lm_eval.models.utils.chunks(re_ord.get_reordered(), self.batch_size)),
267
+ disable=disable_tqdm,
268
+ ):
269
+ batch_inp = []
270
+ batch_cache_key = []
271
+ batch_continuation_enc = []
272
+ # len(chunk) is the batch_size
273
+ for cache_key, context_enc, continuation_enc in chunk:
274
+ # how this all works (illustrated on a causal decoder-only setup):
275
+ # CTX CONT
276
+ # inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1]
277
+ # model \ \
278
+ # logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the
279
+ # cont_toks 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice # noqa: E501
280
+
281
+ inp = (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1]
282
+
283
+ batch_inp.append(self.tokenizer.decode(inp))
284
+ batch_cache_key.append(cache_key)
285
+ batch_continuation_enc.append(continuation_enc)
286
+
287
+ response = self.model(
288
+ prompt=batch_inp,
289
+ max_new_tokens=0,
290
+ output_scores=True,
291
+ include_prompt_logits=True,
292
+ )
293
+
294
+ for resp, continuation_enc, cache_key in zip(
295
+ response.generations, batch_continuation_enc, batch_cache_key
296
+ ):
297
+ # (seq_len, vocab_size)
298
+ multi_scores = resp.score
299
+
300
+ from deepsparse.utils.data import numpy_log_softmax
301
+
302
+ # (seq_len, vocab_size) but with softmax applied
303
+ multi_logits = numpy_log_softmax(multi_scores, axis=1)
304
+ # toss out the context half of the sequence
305
+ # (cont_len, vocab_size)
306
+ continuation_multi_logits = multi_logits[-len(continuation_enc) :]
307
+
308
+ # pick out the logits for the continuation tokens
309
+ # (cont_len,)
310
+ continuation_logits = continuation_multi_logits[
311
+ numpy.arange(len(continuation_enc)), continuation_enc
312
+ ]
313
+ # check if the tokens generated greedly are the same
314
+ # as the expected continuation
315
+ greedy_tokens = continuation_multi_logits.argmax(axis=1)
316
+ max_equal = greedy_tokens.tolist() == continuation_enc
317
+
318
+ # Answer: (log prob, is-exact-match)
319
+ answer = (float(continuation_logits.sum()), bool(max_equal))
320
+
321
+ res.append(answer)
322
+
323
+ if cache_key is not None:
324
+ self.cache_hook.add_partial("loglikelihood", cache_key, answer)
325
+
326
+ return re_ord.get_original(res)
327
+
328
+ def loglikelihood_rolling(self, requests: List[Instance]) -> List[float]:
329
+ raise NotImplementedError(
330
+ "The method not required by any of our current task integrations so far"
331
+ )
332
+
333
+ def generate_until(self, requests: List[Instance]) -> List[str]:
334
+ """
335
+ The function to generate a certain number of new tokens
336
+ given a context.
337
+
338
+ This function is an adapted version of the original function from
339
+ https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/models/openai_completions.py
340
+ """
341
+ if not requests:
342
+ return []
343
+ res = []
344
+ requests = [req.args for req in requests]
345
+
346
+ def _collate(x):
347
+ toks = self.tok_encode(x[0])
348
+ return len(toks), x[0]
349
+
350
+ re_ord = utils.Reorderer(requests, _collate)
351
+
352
+ def sameuntil_chunks(xs, size):
353
+ ret = []
354
+ lastuntil = xs[0][1]
355
+ for x in xs:
356
+ if len(ret) >= size or x[1] != lastuntil:
357
+ yield ret, lastuntil
358
+ ret = []
359
+ lastuntil = x[1]
360
+ ret.append(x)
361
+
362
+ if ret:
363
+ yield ret, lastuntil
364
+
365
+ pbar = tqdm(total=len(requests))
366
+ for chunk, request_args in tqdm(
367
+ list(sameuntil_chunks(re_ord.get_reordered(), self.batch_size))
368
+ ):
369
+ inps = []
370
+
371
+ # make a deepcopy since we are changing arguments
372
+ request_args = copy.deepcopy(request_args)
373
+
374
+ self._max_gen_toks = request_args.pop("max_gen_toks", self.max_gen_toks)
375
+
376
+ for context, _ in chunk:
377
+ # add context (prompts) to the list
378
+ inps.append(context)
379
+
380
+ until = request_args.pop("until", ["<|endoftext|>"])
381
+ request_args.pop("do_sample", None)
382
+ request_args["temperature"] = request_args.get("temperature", 0)
383
+
384
+ # run inference (generate max_gen_toks tokens)
385
+ out = self.model(
386
+ sequences=inps,
387
+ max_new_tokens=self.max_gen_toks - 1,
388
+ stop=until,
389
+ **request_args,
390
+ )
391
+
392
+ for resp, (context, args_) in zip(out.generations, chunk):
393
+ text = resp.text
394
+ until_ = until
395
+ # split the text at the first occurrence of any of the until tokens
396
+ for term in until_:
397
+ if len(term) > 0:
398
+ text = text.split(term)[0]
399
+
400
+ res.append(text)
401
+
402
+ self.cache_hook.add_partial(
403
+ "generate_until", (context, {"until": until_}), text
404
+ )
405
+ pbar.update(1)
406
+
407
+ pbar.close()
408
+
409
+ return re_ord.get_original(res)
410
+
411
+ def _encode_pair(
412
+ self, context: str, continuation: str
413
+ ) -> Tuple[List[int], List[int]]:
414
+ """
415
+ Copied directly from
416
+ https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/models/huggingface.py
417
+ """
418
+ n_spaces = len(context) - len(context.rstrip())
419
+ if n_spaces > 0:
420
+ continuation = context[-n_spaces:] + continuation
421
+ context = context[:-n_spaces]
422
+ whole_enc = self.tok_encode(context + continuation)
423
+ context_enc = self.tok_encode(context)
424
+ context_enc_len = len(context_enc)
425
+ continuation_enc = whole_enc[context_enc_len:]
426
+ return context_enc, continuation_enc