applied-ai-018 commited on
Commit
f238c4c
·
verified ·
1 Parent(s): 14482ce

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/build/lib/lm_eval/api/__init__.py +0 -0
  2. lm-evaluation/build/lib/lm_eval/api/filter.py +56 -0
  3. lm-evaluation/build/lib/lm_eval/api/instance.py +38 -0
  4. lm-evaluation/build/lib/lm_eval/api/metrics.py +509 -0
  5. lm-evaluation/build/lib/lm_eval/api/model.py +346 -0
  6. lm-evaluation/build/lib/lm_eval/api/registry.py +172 -0
  7. lm-evaluation/build/lib/lm_eval/api/samplers.py +114 -0
  8. lm-evaluation/build/lib/lm_eval/api/task.py +1498 -0
  9. lm-evaluation/build/lib/lm_eval/decontamination/decontaminate.py +166 -0
  10. lm-evaluation/build/lib/lm_eval/decontamination/janitor.py +328 -0
  11. lm-evaluation/build/lib/lm_eval/filters/__init__.py +48 -0
  12. lm-evaluation/build/lib/lm_eval/filters/decontamination.py +24 -0
  13. lm-evaluation/build/lib/lm_eval/filters/extraction.py +183 -0
  14. lm-evaluation/build/lib/lm_eval/filters/selection.py +52 -0
  15. lm-evaluation/build/lib/lm_eval/filters/transformation.py +52 -0
  16. lm-evaluation/build/lib/lm_eval/models/__init__.py +26 -0
  17. lm-evaluation/build/lib/lm_eval/models/anthropic_llms.py +360 -0
  18. lm-evaluation/build/lib/lm_eval/models/dummy.py +41 -0
  19. lm-evaluation/build/lib/lm_eval/models/gguf.py +130 -0
  20. lm-evaluation/build/lib/lm_eval/models/huggingface.py +1243 -0
  21. lm-evaluation/build/lib/lm_eval/models/mamba_lm.py +126 -0
  22. lm-evaluation/build/lib/lm_eval/models/nemo_lm.py +537 -0
  23. lm-evaluation/build/lib/lm_eval/models/neuron_optimum.py +736 -0
  24. lm-evaluation/build/lib/lm_eval/models/openai_completions.py +481 -0
  25. lm-evaluation/build/lib/lm_eval/models/optimum_lm.py +69 -0
  26. lm-evaluation/build/lib/lm_eval/models/textsynth.py +171 -0
  27. lm-evaluation/build/lib/lm_eval/models/utils.py +615 -0
  28. lm-evaluation/build/lib/lm_eval/models/vllm_causallms.py +487 -0
  29. lm-evaluation/build/lib/lm_eval/prompts/__init__.py +126 -0
  30. lm-evaluation/build/lib/lm_eval/tasks/arc/README.md +54 -0
  31. lm-evaluation/build/lib/lm_eval/tasks/arc/arc_challenge.yaml +3 -0
  32. lm-evaluation/build/lib/lm_eval/tasks/arc/arc_easy.yaml +23 -0
  33. lm-evaluation/build/lib/lm_eval/tasks/eus_proficiency/README.md +48 -0
  34. lm-evaluation/build/lib/lm_eval/tasks/eus_proficiency/eus_proficiency.yaml +16 -0
  35. lm-evaluation/build/lib/lm_eval/tasks/eus_reading/README.md +48 -0
  36. lm-evaluation/build/lib/lm_eval/tasks/eus_reading/eus_reading.yaml +16 -0
  37. lm-evaluation/build/lib/lm_eval/tasks/eus_reading/utils.py +41 -0
  38. lm-evaluation/build/lib/lm_eval/tasks/haerae/README.md +49 -0
  39. lm-evaluation/build/lib/lm_eval/tasks/haerae/_default_haerae_yaml +17 -0
  40. lm-evaluation/build/lib/lm_eval/tasks/haerae/haerae_gk.yaml +3 -0
  41. lm-evaluation/build/lib/lm_eval/tasks/haerae/haerae_hi.yaml +3 -0
  42. lm-evaluation/build/lib/lm_eval/tasks/haerae/haerae_lw.yaml +3 -0
  43. lm-evaluation/build/lib/lm_eval/tasks/haerae/haerae_rw.yaml +3 -0
  44. lm-evaluation/build/lib/lm_eval/tasks/haerae/haerae_sn.yaml +3 -0
  45. lm-evaluation/build/lib/lm_eval/tasks/lambada_cloze/README.md +56 -0
  46. lm-evaluation/build/lib/lm_eval/tasks/lambada_cloze/lambada_openai_cloze.yaml +20 -0
  47. lm-evaluation/build/lib/lm_eval/tasks/lambada_cloze/lambada_standard_cloze.yaml +21 -0
  48. lm-evaluation/build/lib/lm_eval/tasks/logiqa/README.md +52 -0
  49. lm-evaluation/build/lib/lm_eval/tasks/logiqa/logiqa.yaml +23 -0
  50. lm-evaluation/build/lib/lm_eval/tasks/logiqa/utils_logiqa.py +24 -0
lm-evaluation/build/lib/lm_eval/api/__init__.py ADDED
File without changes
lm-evaluation/build/lib/lm_eval/api/filter.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from dataclasses import dataclass
3
+ from typing import Callable, Iterable, List, Union
4
+
5
+ from lm_eval.api.instance import Instance
6
+
7
+
8
+ class Filter(ABC):
9
+ """
10
+ Filter classes operate on a per-task level.
11
+ They take all model outputs (`instance.resps` for all `task.instances`)
12
+ across all instances of a task, and perform operations.
13
+ In a single run, one can configure any number of separate filters or lists of filters.
14
+
15
+ """
16
+
17
+ def __init__(self, **kwargs) -> None:
18
+ """
19
+ Can define custom behavior here, if an individual instantiation of a Filter class should have state.
20
+ """
21
+
22
+ @abstractmethod
23
+ def apply(self, resps: Union[List, Iterable], docs: List[dict]) -> Iterable:
24
+ """
25
+ Defines the operation to perform on a list of the `inst.resps` properties of `Instance` objects.
26
+ Should return the list of (filtered) response lists *in the same order as they were input*, e.g.
27
+ if pass in [<inst.resps for instance 0>, <inst.resps for instance 1>] should return
28
+ [<filtered resps for instance 0>, <filtered resps for instance 1>]
29
+ """
30
+ return resps
31
+
32
+
33
+ @dataclass
34
+ class FilterEnsemble:
35
+ """
36
+ FilterEnsemble creates a pipeline applying multiple filters.
37
+ Its intended usage is to stack multiple post-processing steps in order.
38
+ `task.apply_filters` should use a list of FilterEnsemble classes that it stores, to apply each
39
+ pipeline separately.
40
+ """
41
+
42
+ name: str
43
+ filters: List[Callable[[], Filter]]
44
+
45
+ def apply(self, instances: List[Instance]) -> None:
46
+ resps, docs = zip(*((inst.resps, inst.doc) for inst in instances))
47
+ resps, docs = list(resps), list(docs)
48
+
49
+ for f in self.filters:
50
+ # apply filters in sequence
51
+ resps = f().apply(resps, docs)
52
+
53
+ # add the end results after filtering to filtered_requests of their respective source instances.
54
+ # has key `self.name`: each FilterEnsemble applied in a given run should use a different name.
55
+ for inst, resp in zip(instances, resps):
56
+ inst.filtered_resps[self.name] = resp
lm-evaluation/build/lib/lm_eval/api/instance.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, field
2
+ from typing import Literal, Optional, Tuple
3
+
4
+
5
+ OutputType = Literal[
6
+ "loglikelihood", "loglikelihood_rolling", "generate_until", "multiple_choice"
7
+ ]
8
+
9
+
10
+ @dataclass
11
+ class Instance:
12
+ request_type: OutputType
13
+ doc: dict
14
+ arguments: tuple
15
+ idx: int
16
+ metadata: Tuple[Optional[str], Optional[int], Optional[int]] = field(
17
+ default_factory=lambda: (None, None, None)
18
+ )
19
+ resps: list = field(default_factory=list)
20
+ filtered_resps: dict = field(default_factory=dict)
21
+
22
+ # initialized after init
23
+ task_name: Optional[str] = None
24
+ doc_id: Optional[int] = None
25
+ repeats: Optional[int] = None
26
+
27
+ def __post_init__(self) -> None:
28
+ # unpack metadata field
29
+ self.task_name, self.doc_id, self.repeats = self.metadata
30
+
31
+ @property
32
+ def args(self):
33
+ """
34
+ Returns (string,) where `string` is the string to calculate loglikelihood over
35
+ """
36
+ return (
37
+ self.arguments if isinstance(self.arguments, tuple) else (self.arguments,)
38
+ )
lm-evaluation/build/lib/lm_eval/api/metrics.py ADDED
@@ -0,0 +1,509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import math
3
+ import random
4
+ from collections.abc import Iterable
5
+ from typing import List
6
+
7
+ import evaluate as hf_evaluate
8
+ import numpy as np
9
+ import sacrebleu
10
+ import sklearn.metrics
11
+
12
+ from lm_eval.api.registry import register_aggregation, register_metric
13
+
14
+
15
+ eval_logger = logging.getLogger("lm-eval")
16
+
17
+
18
+ # Register Aggregations First
19
+ @register_aggregation("bypass")
20
+ def bypass_agg(arr):
21
+ return 999
22
+
23
+
24
+ @register_aggregation("mean")
25
+ def mean(arr):
26
+ return sum(arr) / len(arr)
27
+
28
+
29
+ @register_aggregation("median")
30
+ def median(arr):
31
+ return arr[len(arr) // 2]
32
+
33
+
34
+ # Certain metrics must be calculated across all documents in a benchmark.
35
+ # We use them as aggregation metrics, paired with no-op passthrough metric fns.
36
+ @register_aggregation("perplexity")
37
+ def perplexity(items):
38
+ return math.exp(-mean(items))
39
+
40
+
41
+ @register_aggregation("weighted_perplexity")
42
+ def weighted_perplexity(items):
43
+ return math.exp(-weighted_mean(items))
44
+
45
+
46
+ @register_aggregation("bits_per_byte")
47
+ def bits_per_byte(items):
48
+ return -weighted_mean(items) / math.log(2)
49
+
50
+
51
+ @register_aggregation("f1")
52
+ def f1_score(items):
53
+ unzipped_list = list(zip(*items))
54
+ golds = unzipped_list[0]
55
+ preds = unzipped_list[1]
56
+ fscore = sklearn.metrics.f1_score(golds, preds)
57
+
58
+ return np.max(fscore)
59
+
60
+
61
+ @register_aggregation("matthews_corrcoef")
62
+ def matthews_corrcoef(items):
63
+ unzipped_list = list(zip(*items))
64
+ golds = unzipped_list[0]
65
+ preds = unzipped_list[1]
66
+ # print(preds)
67
+ return sklearn.metrics.matthews_corrcoef(golds, preds)
68
+
69
+
70
+ @register_aggregation("bleu")
71
+ def bleu(items):
72
+ """The Bilingual Evaluation Understudy Score, or BLEU for short, is a metric
73
+ for evaluating a generated sentence to a reference sentence. It counts matching
74
+ n-grams in the candidate translation to n-grams in the reference text, where
75
+ 1-gram or unigram would be each token and a bigram comparison would be each
76
+ word pair. The comparison is made regardless of word order
77
+ Source: https://machinelearningmastery.com/calculate-bleu-score-for-text-python/
78
+ Paper: https://www.aclweb.org/anthology/P02-1040/
79
+
80
+ Higher is better
81
+ """
82
+ refs = list(zip(*items))[0]
83
+ preds = list(zip(*items))[1]
84
+ refs, preds = _sacreformat(refs, preds)
85
+ return sacrebleu.corpus_bleu(preds, refs).score
86
+
87
+
88
+ @register_aggregation("chrf")
89
+ def chrf(items):
90
+ """chrF++ is a tool for automatic evaluation of machine translation output
91
+ based on character n-gram precision and recall enhanced with word n-grams.
92
+ Source: https://github.com/m-popovic/chrF
93
+ Paper: https://www.aclweb.org/anthology/W15-3049.pdf
94
+
95
+ Higher is better # TODO I think
96
+ """
97
+ refs = list(zip(*items))[0]
98
+ preds = list(zip(*items))[1]
99
+ refs, preds = _sacreformat(refs, preds)
100
+ return sacrebleu.corpus_chrf(preds, refs).score
101
+
102
+
103
+ @register_aggregation("ter")
104
+ def ter(items):
105
+ """Translation Error Rate is an error metric for machine translation that
106
+ measures the number of edits required to change a system output into one
107
+ of the references
108
+ Source: http://www.cs.umd.edu/~snover/tercom/
109
+ Paper: http://mt-archive.info/AMTA-2006-Snover.pdf
110
+
111
+ Lower is better
112
+ """
113
+ refs = list(zip(*items))[0]
114
+ preds = list(zip(*items))[1]
115
+ refs, preds = _sacreformat(refs, preds)
116
+ return sacrebleu.corpus_ter(preds, refs).score
117
+
118
+
119
+ @register_aggregation("brier_score")
120
+ def brier_score(items): # This is a passthrough function
121
+ gold, predictions = list(zip(*items))
122
+ gold = list(gold)
123
+ gold_one_hot = np.eye(np.max(gold) + 1)[gold]
124
+ predictions = list(zip(*items))[1]
125
+ return np.mean(np.sum((predictions - gold_one_hot) ** 2, axis=1))
126
+
127
+
128
+ @register_metric(
129
+ metric="brier_score",
130
+ higher_is_better=False,
131
+ output_type=["multiple_choice"],
132
+ aggregation="brier_score",
133
+ )
134
+ def brier_score_fn(items): # This is a passthrough function
135
+ return items
136
+
137
+
138
+ @register_metric(
139
+ metric="acc",
140
+ higher_is_better=True,
141
+ output_type=["loglikelihood", "multiple_choice"],
142
+ aggregation="mean",
143
+ )
144
+ def acc_fn(items): # This is a passthrough function
145
+ return items
146
+
147
+
148
+ @register_metric(
149
+ metric="acc_norm",
150
+ higher_is_better=True,
151
+ output_type=["loglikelihood", "multiple_choice"],
152
+ aggregation="mean",
153
+ )
154
+ def acc_norm_fn(items): # This is a passthrough function
155
+ return items
156
+
157
+
158
+ @register_metric(
159
+ metric="acc_mutual_info",
160
+ higher_is_better=True,
161
+ output_type="multiple_choice",
162
+ aggregation="mean",
163
+ )
164
+ def acc_mutual_info_fn(items): # This is a passthrough function
165
+ return items
166
+
167
+
168
+ exact_match = hf_evaluate.load("exact_match")
169
+
170
+
171
+ @register_metric(
172
+ metric="exact_match",
173
+ higher_is_better=True,
174
+ output_type="generate_until",
175
+ aggregation="mean",
176
+ )
177
+ def exact_match_fn(**kwargs):
178
+ return exact_match.compute(**kwargs)
179
+
180
+
181
+ @register_metric(
182
+ metric="perplexity",
183
+ higher_is_better=False,
184
+ output_type="loglikelihood",
185
+ aggregation="perplexity",
186
+ )
187
+ def perplexity_fn(items): # This is a passthrough function
188
+ return items
189
+
190
+
191
+ @register_metric(
192
+ metric="word_perplexity",
193
+ higher_is_better=False,
194
+ output_type="loglikelihood_rolling",
195
+ aggregation="weighted_perplexity",
196
+ )
197
+ def word_perplexity_fn(items): # This is a passthrough function
198
+ return items
199
+
200
+
201
+ @register_metric(
202
+ metric="byte_perplexity",
203
+ higher_is_better=False,
204
+ output_type="loglikelihood_rolling",
205
+ aggregation="weighted_perplexity",
206
+ )
207
+ def byte_perplexity_fn(items): # This is a passthrough function
208
+ return items
209
+
210
+
211
+ @register_metric(
212
+ metric="bits_per_byte",
213
+ higher_is_better=False,
214
+ output_type="loglikelihood_rolling",
215
+ aggregation="bits_per_byte",
216
+ )
217
+ def bits_per_byte_fn(items): # This is a passthrough function
218
+ return items
219
+
220
+
221
+ def pop_stddev(arr):
222
+ mu = mean(arr)
223
+ return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / len(arr))
224
+
225
+
226
+ def sample_stddev(arr):
227
+ mu = mean(arr)
228
+ return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / (len(arr) - 1))
229
+
230
+
231
+ def mean_stderr(arr):
232
+ return sample_stddev(arr) / math.sqrt(len(arr))
233
+
234
+
235
+ @register_metric(
236
+ metric="bypass",
237
+ higher_is_better=True,
238
+ output_type=["loglikelihood", "multiple_choice", "generate_until"],
239
+ aggregation="bypass",
240
+ )
241
+ def bypass(items):
242
+ return None
243
+
244
+
245
+ @register_metric(
246
+ metric="mcc",
247
+ higher_is_better=True,
248
+ output_type="multiple_choice",
249
+ aggregation="matthews_corrcoef",
250
+ )
251
+ def mcc_fn(items): # This is a passthrough function
252
+ return items
253
+
254
+
255
+ @register_metric(
256
+ metric="f1",
257
+ higher_is_better=True,
258
+ output_type="multiple_choice",
259
+ aggregation="f1",
260
+ )
261
+ def f1_fn(items): # This is a passthrough function
262
+ return items
263
+
264
+
265
+ @register_metric(
266
+ metric="bleu",
267
+ higher_is_better=True,
268
+ output_type="generate_until",
269
+ aggregation="bleu",
270
+ )
271
+ def bleu_fn(items): # This is a passthrough function
272
+ return items
273
+
274
+
275
+ @register_metric(
276
+ metric="chrf",
277
+ higher_is_better=True,
278
+ output_type="generate_until",
279
+ aggregation="chrf",
280
+ )
281
+ def chrf_fn(items): # This is a passthrough function
282
+ return items
283
+
284
+
285
+ @register_metric(
286
+ metric="ter",
287
+ higher_is_better=True,
288
+ output_type="generate_until",
289
+ aggregation="ter",
290
+ )
291
+ def ter_fn(items): # This is a passthrough function
292
+ return items
293
+
294
+
295
+ @register_metric(
296
+ metric="acc_all",
297
+ higher_is_better=True,
298
+ output_type="loglikelihood",
299
+ aggregation="mean",
300
+ )
301
+ def acc_all(items):
302
+ # Only count as correct if all answers are labeled correctly for each question
303
+ question_scoring_dict = {}
304
+ preds = list(zip(*items))[0]
305
+ docs = list(zip(*items))[1]
306
+
307
+ for doc, pred in zip(docs, preds):
308
+ paragraph_id = doc["idx"]["paragraph"]
309
+ question_id = doc["idx"]["question"]
310
+ if (paragraph_id, question_id) not in question_scoring_dict:
311
+ question_scoring_dict[(paragraph_id, question_id)] = []
312
+
313
+ gold_label = doc["label"] == 1
314
+
315
+ question_scoring_dict[(paragraph_id, question_id)].append(gold_label == pred)
316
+ acc = np.mean([int(all(x)) for x in question_scoring_dict.values()])
317
+ return acc
318
+
319
+
320
+ def acc_all_stderr(items):
321
+ # Only count as correct if all answers are labeled correctly for each question
322
+ question_scoring_dict = {}
323
+ preds = list(zip(*items))[0]
324
+ docs = list(zip(*items))[1]
325
+
326
+ for doc, pred in zip(docs, preds):
327
+ question_id = doc["idx"]["question"]
328
+ if question_id not in question_scoring_dict:
329
+ question_scoring_dict[question_id] = []
330
+
331
+ gold_label = doc["label"] == 1
332
+ question_scoring_dict[question_id].append(gold_label == pred)
333
+
334
+ acc = mean_stderr([int(all(x)) for x in question_scoring_dict.values()])
335
+ return acc
336
+
337
+
338
+ def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
339
+ """Compute max metric between prediction and each ground truth."""
340
+ scores_for_ground_truths = []
341
+ for ground_truth in ground_truths:
342
+ score = metric_fn(prediction, ground_truth)
343
+ scores_for_ground_truths.append(score)
344
+ return max(scores_for_ground_truths)
345
+
346
+
347
+ def weighted_mean(items):
348
+ a, b = zip(*items)
349
+ return sum(a) / sum(b)
350
+
351
+
352
+ def is_non_str_iterable(obj):
353
+ return isinstance(obj, Iterable) and not isinstance(obj, str)
354
+
355
+
356
+ def _sacreformat(refs, preds):
357
+ """Format refs and preds for sacrebleu corpus calculation. It is very particular"""
358
+ # Sacrebleu expects (List[str], List[List[str])
359
+ # e.g. sacrebleu.corpus_bleu([pred_t], [[ref1_stream], [ref2_stream], ...])
360
+
361
+ # Note [ref1_stream] is the first reference for each pred.
362
+ # So lists are size N and (M, N) for N preds and M possible refs for each pred
363
+ # This is a different order of dimensions that I would expect
364
+
365
+ # We expect refs to be List[str] or List[List[str]], the outer list corresponding to preds
366
+ # Must become List[List[str]] with the inner list corresponding to preds
367
+ if not is_non_str_iterable(refs):
368
+ refs = list(refs)
369
+ if not is_non_str_iterable(refs[0]):
370
+ refs = [[ref] for ref in refs]
371
+ refs = list(zip(*refs))
372
+ # Note the number of refs in each ref list much match the number of preds
373
+
374
+ # We expect preds to be List[str] or List[List[str]]. Must become List[str]
375
+ if not is_non_str_iterable(preds):
376
+ preds = list(preds)
377
+ if is_non_str_iterable(preds[0]):
378
+ assert len(preds[0]) == 1, f"Pred must be a str, was {preds[0]}"
379
+ preds = [pred[0] for pred in preds]
380
+
381
+ return refs, preds
382
+
383
+
384
+ # stderr stuff
385
+
386
+
387
+ class _bootstrap_internal:
388
+ def __init__(self, f, n) -> None:
389
+ self.f = f
390
+ self.n = n
391
+
392
+ def __call__(self, v):
393
+ i, xs = v
394
+ rnd = random.Random()
395
+ rnd.seed(i)
396
+ res = []
397
+ for _ in range(self.n):
398
+ res.append(self.f(rnd.choices(xs, k=len(xs))))
399
+ return res
400
+
401
+
402
+ def bootstrap_stderr(f, xs, iters):
403
+ import multiprocessing as mp
404
+
405
+ pool = mp.Pool(mp.cpu_count())
406
+ # this gives a biased estimate of the stderr (i.e w/ the mean, it gives something
407
+ # equivalent to stderr calculated without Bessel's correction in the stddev.
408
+ # Unfortunately, I haven't been able to figure out what the right correction is
409
+ # to make the bootstrap unbiased - i considered multiplying by sqrt(n/(n-1)) but
410
+ # that would be ad-hoc and I can't prove that that would actually be an unbiased estimator)
411
+ # Thankfully, shouldn't matter because our samples are pretty big usually anyways
412
+ res = []
413
+ chunk_size = min(1000, iters)
414
+ from tqdm import tqdm
415
+
416
+ print("bootstrapping for stddev:", f.__name__)
417
+ for bootstrap in tqdm(
418
+ pool.imap(
419
+ _bootstrap_internal(f, chunk_size),
420
+ [(i, xs) for i in range(iters // chunk_size)],
421
+ ),
422
+ total=iters // chunk_size,
423
+ ):
424
+ # sample w replacement
425
+ res.extend(bootstrap)
426
+
427
+ pool.close()
428
+ return sample_stddev(res)
429
+
430
+
431
+ def stderr_for_metric(metric, bootstrap_iters):
432
+ bootstrappable = [
433
+ median,
434
+ matthews_corrcoef,
435
+ f1_score,
436
+ perplexity,
437
+ bleu,
438
+ chrf,
439
+ ter,
440
+ ]
441
+
442
+ if metric in bootstrappable:
443
+ return lambda x: bootstrap_stderr(metric, x, iters=bootstrap_iters)
444
+
445
+ stderr = {mean: mean_stderr, acc_all: acc_all_stderr}
446
+
447
+ return stderr.get(metric, None)
448
+
449
+
450
+ def pooled_sample_stderr(stderrs: List[float], sizes: List[int]):
451
+ # Used to aggregate bootstrapped stderrs across subtasks in a group,
452
+ # when we are weighting by the size of each subtask.
453
+ #
454
+
455
+ assert len(stderrs) == len(sizes)
456
+
457
+ # formula source: https://en.wikipedia.org/wiki/Pooled_variance
458
+ # and: https://stats.stackexchange.com/a/4841331
459
+ # this empirically seems to match running `stderr_for_metric` on all instances
460
+ # from the subtasks concatenated with each other.
461
+ pooled_sample_var = (
462
+ sum([(size - 1) * stderr**2 * size for size, stderr in zip(sizes, stderrs)])
463
+ ) / (sum(sizes) - len(sizes))
464
+
465
+ return np.sqrt(pooled_sample_var / sum(sizes))
466
+
467
+
468
+ def combined_sample_stderr(stderrs: List[float], sizes: List[int], metrics=None):
469
+ assert (
470
+ metrics is not None
471
+ ), "Need to pass a list of each subtask's metric for this stderr aggregation"
472
+ assert len(stderrs) == len(sizes) and len(sizes) == len(metrics)
473
+
474
+ # See https://github.com/EleutherAI/lm-evaluation-harness/pull/1390 for more documentation.
475
+ # This formula depends on sample means.
476
+ # removed because it seems to give erroneously huge stderrs for groupings of tasks
477
+ # and does not seem to match up with bootstrap-calculated stderrs for groups.
478
+
479
+ ### don't use this unless a statistician has told you it's the right thing to do ###
480
+
481
+ # accumulators: we'll aggregate pairwise N - 1 times
482
+ variance = stderrs[0] ** 2
483
+ curr_size = sizes[0]
484
+ curr_score = metrics[0]
485
+
486
+ for stderr, size, score in zip(stderrs[1:], sizes[1:], metrics[1:]):
487
+ curr_score = ((curr_score * curr_size) + (score * size)) / (
488
+ curr_size + size
489
+ ) # NOTE: this assumes our aggregation fn is "mean"
490
+
491
+ variance = ((curr_size - 1) * variance + (size - 1) * (stderr**2)) / (
492
+ curr_size + size - 1
493
+ ) + curr_size * size / ((curr_size + size) * (curr_size + size - 1)) * (
494
+ curr_score - score
495
+ ) ** 2
496
+
497
+ return np.sqrt(variance)
498
+
499
+
500
+ def aggregate_subtask_metrics(metrics, sizes, weight_by_size=True):
501
+ # A helper function that is used to aggregate
502
+ # subtask scores cross-task.
503
+ # TODO: does not hold for non-mean aggregations
504
+ if not weight_by_size:
505
+ sizes = [1] * len(sizes)
506
+
507
+ assert len(metrics) == len(sizes)
508
+
509
+ return sum([metric * size for metric, size in zip(metrics, sizes)]) / sum(sizes)
lm-evaluation/build/lib/lm_eval/api/model.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import hashlib
3
+ import json
4
+ import logging
5
+ import os
6
+ from typing import List, Optional, Tuple, Type, TypeVar
7
+
8
+ import transformers
9
+ from sqlitedict import SqliteDict
10
+ from tqdm import tqdm
11
+
12
+ from lm_eval import utils
13
+
14
+
15
+ eval_logger = logging.getLogger("lm-eval")
16
+
17
+ T = TypeVar("T", bound="LM")
18
+
19
+
20
+ class LM(abc.ABC):
21
+ def __init__(self) -> None:
22
+ """Defines the interface that should be implemented by all LM subclasses.
23
+ LMs are assumed to take text (strings) as input and yield strings as output
24
+ (inputs/outputs should be tokenization-agnostic.)
25
+
26
+ """
27
+ # set rank and world size to a single process, by default.
28
+ self._rank = 0
29
+ self._world_size = 1
30
+ self.cache_hook = CacheHook(None)
31
+
32
+ @abc.abstractmethod
33
+ def loglikelihood(self, requests) -> List[Tuple[float, bool]]:
34
+ """Compute log-likelihood of generating a continuation from a context.
35
+ Downstream tasks should attempt to use loglikelihood instead of other
36
+ LM calls whenever possible.
37
+
38
+ :param requests: list[Instance]
39
+ A list of Instance objects, with property `args` which returns a tuple (context, continuation).
40
+ `context: str`
41
+ Context string. Implementations of LM must be able to handle an
42
+ empty context string.
43
+ `continuation: str`
44
+ The continuation over which log likelihood will be calculated. If
45
+ there is a word boundary, the space should be in the continuation.
46
+ For example, context="hello" continuation=" world" is correct.
47
+
48
+ :return: list[tuple[float, bool]]
49
+ A list of pairs (logprob, isgreedy)
50
+ `logprob: float`
51
+ The log probability of `continuation`.
52
+ `isgreedy`:
53
+ Whether `continuation` would be generated by greedy sampling from `context`.
54
+ """
55
+ pass
56
+
57
+ @abc.abstractmethod
58
+ def loglikelihood_rolling(self, requests) -> List[Tuple[float]]:
59
+ """Compute full log-likelihood of a string, with no truncation, for perplexity computation
60
+ - We will use the full max context length of the model.
61
+ - For inputs that exceed the max context length, we divide the tokenized string into chunks of up to
62
+ the max context length.
63
+ - IMPORTANT: Each document's loglikelihood/perplexity is computed *separately*, unlike other implementations
64
+ which may simply concatenate multiple documents together.
65
+ - IMPORTANT: We maximize the amount of context for each prediction. Specifically, for inputs that we break into
66
+ multiple chunks, the last input will still a full-sized context.
67
+ Example:
68
+ Input tokens: [ 0 1 2 3 4 5 6 7 8 9 ]
69
+ Prefix: BOS/EOS
70
+ Max context length: 4
71
+ Resulting input/prediction pairs:
72
+
73
+ INPUT: BOS 0 1 2
74
+ PRED: 0 1 2 3
75
+
76
+ INPUT: 3 4 5 6
77
+ PRED: 4 5 6 7
78
+
79
+ INPUT: 5 6 7 8
80
+ PRED: 8 9
81
+
82
+ Observe that:
83
+ 1. Each token is predicted exactly once
84
+ 2. For the last pair, we provide the full context, but only score the last two tokens
85
+
86
+ :param requests: list[Instance]
87
+ A list of Instance objects with property `args` which returns a tuple (context,).
88
+ string: str
89
+ String for which we are computing overall loglikelihood
90
+ :return: list[tuple[float]]
91
+ A list of tuples (logprob,)
92
+ logprob: float
93
+ The log probability of `context` conditioned on the BOS/EOS token.
94
+ Can also be overridden for custom cases by `prefix_token_id`.
95
+ """
96
+ pass
97
+
98
+ # TODO: Add an optional max length
99
+ @abc.abstractmethod
100
+ def generate_until(self, requests) -> List[str]:
101
+ """Generate greedily until a stopping sequence
102
+
103
+ :param requests: list[Instance]
104
+ A list of Instance objects with property `args` which returns a tuple (context, until).
105
+ context: str
106
+ Context string
107
+ until: [str]
108
+ The string sequences to generate until. These string sequences
109
+ may each span across multiple tokens, or may be part of one token.
110
+ :return: list[str]
111
+ A list of strings continuation
112
+ continuation: str
113
+ The generated continuation.
114
+ """
115
+ pass
116
+
117
+ @classmethod
118
+ def create_from_arg_string(
119
+ cls: Type[T], arg_string: str, additional_config: Optional[dict] = None
120
+ ) -> T:
121
+ """
122
+ Creates an instance of the LM class using the given argument string and additional config.
123
+
124
+ Parameters:
125
+ - arg_string: A string containing arguments in the format key1=value1,key2=value2.
126
+ - additional_config: Optional dictionary containing additional configuration parameters.
127
+
128
+ Returns:
129
+ - Instance of the LM class.
130
+ """
131
+ additional_config = {} if additional_config is None else additional_config
132
+ args = utils.simple_parse_args_string(arg_string)
133
+ args2 = {k: v for k, v in additional_config.items() if v is not None}
134
+ return cls(**args, **args2)
135
+
136
+ @classmethod
137
+ def create_from_arg_obj(
138
+ cls: Type[T], arg_dict: dict, additional_config: Optional[dict] = None
139
+ ) -> T:
140
+ """
141
+ Creates an instance of the LM class using the given arg_obj
142
+
143
+ Parameters:
144
+ - arg_obj: A dict containing arguments in the format key1=value1,key2=value2.
145
+ - additional_config: Optional dictionary containing additional configuration parameters.
146
+
147
+ Returns:
148
+ - Instance of the LM class.
149
+ """
150
+
151
+ additional_config = {} if additional_config is None else additional_config
152
+ additional_config = {
153
+ k: v for k, v in additional_config.items() if v is not None
154
+ }
155
+
156
+ return cls(**arg_dict, **additional_config)
157
+
158
+ @property
159
+ def rank(self):
160
+ # used in the case of parallelism. Hardcoded to
161
+ # ensure no errors arise using API models which do
162
+ # not support multi-device parallelism nor expect it.
163
+ return self._rank
164
+
165
+ @property
166
+ def world_size(self):
167
+ # used in the case of parallelism. Hardcoded to
168
+ # ensure no errors arise using API models which do
169
+ # not support multi-device parallelism nor expect it.
170
+ return self._world_size
171
+
172
+ def set_cache_hook(self, cache_hook) -> None:
173
+ self.cache_hook = cache_hook
174
+
175
+
176
+ ### SQLite-based caching of LM responses
177
+ def hash_args(attr, args):
178
+ dat = json.dumps([attr] + list(args))
179
+ return hashlib.sha256(dat.encode("utf-8")).hexdigest()
180
+
181
+
182
+ class CacheHook:
183
+ def __init__(self, cachinglm) -> None:
184
+ if cachinglm is None:
185
+ self.dbdict = None
186
+ return
187
+
188
+ self.dbdict = cachinglm.dbdict
189
+
190
+ def add_partial(self, attr, req, res) -> None:
191
+ if self.dbdict is None:
192
+ return
193
+ hsh = hash_args(attr, req)
194
+ self.dbdict[hsh] = res
195
+
196
+
197
+ class CachingLM:
198
+ def __init__(self, lm, cache_db) -> None:
199
+ """LM wrapper that returns cached results if they exist, and uses the underlying LM if not.
200
+
201
+ :param lm: LM
202
+ Underlying LM
203
+ :param cache_db: str
204
+ Path to cache db
205
+ """
206
+ self.lm = lm
207
+ self.cache_db = cache_db
208
+ if os.path.dirname(cache_db):
209
+ os.makedirs(os.path.dirname(cache_db), exist_ok=True)
210
+ self.dbdict = SqliteDict(cache_db, autocommit=True)
211
+
212
+ # add hook to lm
213
+ lm.set_cache_hook(self.get_cache_hook())
214
+
215
+ def __getattr__(self, attr):
216
+ lm_attr = getattr(self.lm, attr)
217
+ if not callable(lm_attr):
218
+ return lm_attr
219
+
220
+ def fn(requests):
221
+ res = []
222
+ remaining_reqs = []
223
+ warned = False
224
+ # figure out which ones are cached and which ones are new
225
+ eval_logger.info(
226
+ f"Loading '{attr}' responses from cache '{self.cache_db}' where possible..."
227
+ )
228
+ for req in tqdm(requests, desc="Checking cached requests"):
229
+ hsh = hash_args(attr, req.args)
230
+ if attr == "generate_until" and req.args[1].get("do_sample", False):
231
+ # when we are doing non-greedy generation, don't use the cache
232
+ # (else every "randomly sampled" generation would be identical for repeats > 1).
233
+ if not warned:
234
+ eval_logger.warning(
235
+ f"Arguments to lm.generate_until() '{req.args[1]}' include non-deterministic sampling. Caching will not be performed for such requests."
236
+ )
237
+ warned = True
238
+ res.append(None)
239
+ remaining_reqs.append(req)
240
+ elif hsh in self.dbdict:
241
+ ob = self.dbdict[hsh]
242
+
243
+ assert ob is not None
244
+
245
+ res.append(ob)
246
+ else:
247
+ res.append(None)
248
+ remaining_reqs.append(req)
249
+ eval_logger.info(
250
+ f"Cached requests: {len(requests) - len(remaining_reqs)}, Requests remaining: {len(remaining_reqs)}"
251
+ )
252
+ # actually run the LM on the requests that do not have cached results
253
+ rem_res = getattr(self.lm, attr)(remaining_reqs)
254
+
255
+ # stick the new ones back into the list and also cache any of the new ones
256
+ resptr = 0
257
+ for req, r in zip(remaining_reqs, rem_res):
258
+ while res[resptr] is not None:
259
+ resptr += 1
260
+
261
+ res[resptr] = r
262
+
263
+ # caching
264
+ hsh = hash_args(attr, req.args)
265
+ self.dbdict[hsh] = r
266
+ self.dbdict.commit()
267
+
268
+ return res
269
+
270
+ return fn
271
+
272
+ def get_cache_hook(self):
273
+ return CacheHook(self)
274
+
275
+
276
+ class TemplateLM(LM):
277
+ """
278
+ A class acting as intermediary between the LM base class
279
+ and boilerplate often included in other LM subclasses.
280
+ """
281
+
282
+ @property
283
+ @abc.abstractmethod
284
+ def eot_token_id(self):
285
+ pass
286
+
287
+ @property
288
+ def prefix_token_id(self):
289
+ # it is used as prefix for loglikelihood
290
+ return self.eot_token_id
291
+
292
+ @abc.abstractmethod
293
+ def tok_encode(self, string: str, **kwargs):
294
+ pass
295
+
296
+ @abc.abstractmethod
297
+ def _loglikelihood_tokens(self, requests, **kwargs):
298
+ pass
299
+
300
+ def _encode_pair(self, context, continuation):
301
+ n_spaces = len(context) - len(context.rstrip())
302
+ if n_spaces > 0:
303
+ continuation = context[-n_spaces:] + continuation
304
+ context = context[:-n_spaces]
305
+
306
+ model_class = getattr(self, "AUTO_MODEL_CLASS", None)
307
+
308
+ if model_class == transformers.AutoModelForSeq2SeqLM:
309
+ context_enc = self.tok_encode(context)
310
+ continuation_enc = self.tok_encode(continuation, add_special_tokens=False)
311
+ else:
312
+ whole_enc = self.tok_encode(context + continuation)
313
+ context_enc = self.tok_encode(context)
314
+
315
+ context_enc_len = len(context_enc)
316
+ continuation_enc = whole_enc[context_enc_len:]
317
+
318
+ return context_enc, continuation_enc
319
+
320
+ def loglikelihood(
321
+ self, requests, disable_tqdm: bool = False
322
+ ) -> List[Tuple[float, bool]]:
323
+ new_reqs = []
324
+ for context, continuation in [req.args for req in requests]:
325
+ if context == "":
326
+ # BOS or EOS as context
327
+ context_enc, continuation_enc = (
328
+ [self.prefix_token_id],
329
+ self.tok_encode(continuation),
330
+ )
331
+ else:
332
+ context_enc, continuation_enc = self._encode_pair(context, continuation)
333
+
334
+ new_reqs.append(((context, continuation), context_enc, continuation_enc))
335
+
336
+ return self._loglikelihood_tokens(new_reqs, disable_tqdm=disable_tqdm)
337
+
338
+ @abc.abstractmethod
339
+ def loglikelihood_rolling(
340
+ self, requests, disable_tqdm: bool = False
341
+ ) -> List[Tuple[float, bool]]:
342
+ pass
343
+
344
+ @abc.abstractmethod
345
+ def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]:
346
+ pass
lm-evaluation/build/lib/lm_eval/api/registry.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import Callable, Dict
3
+
4
+ import evaluate as hf_evaluate
5
+
6
+ from lm_eval.api.model import LM
7
+
8
+
9
+ eval_logger = logging.getLogger("lm-eval")
10
+
11
+ MODEL_REGISTRY = {}
12
+
13
+
14
+ def register_model(*names):
15
+ # either pass a list or a single alias.
16
+ # function receives them as a tuple of strings
17
+
18
+ def decorate(cls):
19
+ for name in names:
20
+ assert issubclass(
21
+ cls, LM
22
+ ), f"Model '{name}' ({cls.__name__}) must extend LM class"
23
+
24
+ assert (
25
+ name not in MODEL_REGISTRY
26
+ ), f"Model named '{name}' conflicts with existing model! Please register with a non-conflicting alias instead."
27
+
28
+ MODEL_REGISTRY[name] = cls
29
+ return cls
30
+
31
+ return decorate
32
+
33
+
34
+ def get_model(model_name):
35
+ try:
36
+ return MODEL_REGISTRY[model_name]
37
+ except KeyError:
38
+ raise ValueError(
39
+ f"Attempted to load model '{model_name}', but no model for this name found! Supported model names: {', '.join(MODEL_REGISTRY.keys())}"
40
+ )
41
+
42
+
43
+ TASK_REGISTRY = {}
44
+ GROUP_REGISTRY = {}
45
+ ALL_TASKS = set()
46
+ func2task_index = {}
47
+
48
+
49
+ def register_task(name):
50
+ def decorate(fn):
51
+ assert (
52
+ name not in TASK_REGISTRY
53
+ ), f"task named '{name}' conflicts with existing registered task!"
54
+
55
+ TASK_REGISTRY[name] = fn
56
+ ALL_TASKS.add(name)
57
+ func2task_index[fn.__name__] = name
58
+ return fn
59
+
60
+ return decorate
61
+
62
+
63
+ def register_group(name):
64
+ def decorate(fn):
65
+ func_name = func2task_index[fn.__name__]
66
+ if name in GROUP_REGISTRY:
67
+ GROUP_REGISTRY[name].append(func_name)
68
+ else:
69
+ GROUP_REGISTRY[name] = [func_name]
70
+ ALL_TASKS.add(name)
71
+ return fn
72
+
73
+ return decorate
74
+
75
+
76
+ OUTPUT_TYPE_REGISTRY = {}
77
+ METRIC_REGISTRY = {}
78
+ METRIC_AGGREGATION_REGISTRY = {}
79
+ AGGREGATION_REGISTRY: Dict[str, Callable[[], Dict[str, Callable]]] = {}
80
+ HIGHER_IS_BETTER_REGISTRY = {}
81
+
82
+ DEFAULT_METRIC_REGISTRY = {
83
+ "loglikelihood": [
84
+ "perplexity",
85
+ "acc",
86
+ ],
87
+ "loglikelihood_rolling": ["word_perplexity", "byte_perplexity", "bits_per_byte"],
88
+ "multiple_choice": ["acc", "acc_norm"],
89
+ "generate_until": ["exact_match"],
90
+ }
91
+
92
+
93
+ def register_metric(**args):
94
+ # TODO: do we want to enforce a certain interface to registered metrics?
95
+ def decorate(fn):
96
+ assert "metric" in args
97
+ name = args["metric"]
98
+
99
+ for key, registry in [
100
+ ("metric", METRIC_REGISTRY),
101
+ ("higher_is_better", HIGHER_IS_BETTER_REGISTRY),
102
+ ("aggregation", METRIC_AGGREGATION_REGISTRY),
103
+ ]:
104
+ if key in args:
105
+ value = args[key]
106
+ assert (
107
+ value not in registry
108
+ ), f"{key} named '{value}' conflicts with existing registered {key}!"
109
+
110
+ if key == "metric":
111
+ registry[name] = fn
112
+ elif key == "aggregation":
113
+ registry[name] = AGGREGATION_REGISTRY[value]
114
+ else:
115
+ registry[name] = value
116
+
117
+ return fn
118
+
119
+ return decorate
120
+
121
+
122
+ def get_metric(name: str, hf_evaluate_metric=False) -> Callable:
123
+ if not hf_evaluate_metric:
124
+ if name in METRIC_REGISTRY:
125
+ return METRIC_REGISTRY[name]
126
+ else:
127
+ eval_logger.warning(
128
+ f"Could not find registered metric '{name}' in lm-eval, searching in HF Evaluate library..."
129
+ )
130
+
131
+ try:
132
+ metric_object = hf_evaluate.load(name)
133
+ return metric_object.compute
134
+ except Exception:
135
+ eval_logger.error(
136
+ f"{name} not found in the evaluate library! Please check https://huggingface.co/evaluate-metric",
137
+ )
138
+
139
+
140
+ def register_aggregation(name: str):
141
+ def decorate(fn):
142
+ assert (
143
+ name not in AGGREGATION_REGISTRY
144
+ ), f"aggregation named '{name}' conflicts with existing registered aggregation!"
145
+
146
+ AGGREGATION_REGISTRY[name] = fn
147
+ return fn
148
+
149
+ return decorate
150
+
151
+
152
+ def get_aggregation(name: str) -> Callable[[], Dict[str, Callable]]:
153
+ try:
154
+ return AGGREGATION_REGISTRY[name]
155
+ except KeyError:
156
+ eval_logger.warning(f"{name} not a registered aggregation metric!")
157
+
158
+
159
+ def get_metric_aggregation(name: str) -> Callable[[], Dict[str, Callable]]:
160
+ try:
161
+ return METRIC_AGGREGATION_REGISTRY[name]
162
+ except KeyError:
163
+ eval_logger.warning(f"{name} metric is not assigned a default aggregation!")
164
+
165
+
166
+ def is_higher_better(metric_name) -> bool:
167
+ try:
168
+ return HIGHER_IS_BETTER_REGISTRY[metric_name]
169
+ except KeyError:
170
+ eval_logger.warning(
171
+ f"higher_is_better not specified for metric '{metric_name}'!"
172
+ )
lm-evaluation/build/lib/lm_eval/api/samplers.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class ContextSampler:
2
+ def __init__(self, docs, task, fewshot_indices=None, rnd=None) -> None:
3
+ self.rnd = rnd
4
+ assert self.rnd, "must pass rnd to FewShotSampler!"
5
+
6
+ self.task = task
7
+ self.config = task._config
8
+
9
+ self.target_delimiter = self.config.target_delimiter
10
+ self.fewshot_delimiter = self.config.fewshot_delimiter
11
+
12
+ self.doc_to_text = self.task.doc_to_text
13
+ self.doc_to_target = self.task.doc_to_target
14
+ self.doc_to_choice = self.task.doc_to_choice
15
+
16
+ self.docs = docs # HF dataset split, provided by task._fewshot_docs()
17
+ if fewshot_indices: # subset few-shot docs from
18
+ self.docs = self.docs.select(fewshot_indices)
19
+
20
+ def get_context(self, doc, num_fewshot):
21
+ # draw an extra fewshot sample if using same split as evaluating on
22
+ n_samples = (
23
+ num_fewshot + 1
24
+ if self.config.fewshot_split == self.config.test_split
25
+ else num_fewshot
26
+ )
27
+
28
+ # draw `n_samples` docs from fewshot_docs
29
+ fewshotex = self.sample(n_samples)
30
+
31
+ # get rid of the doc that's the one we're evaluating, if it's in the fewshot
32
+ # TODO: should we just stop people from using fewshot from same split as evaluating?
33
+ selected_docs = [x for x in fewshotex if x != doc][:num_fewshot]
34
+
35
+ labeled_examples = (
36
+ self.fewshot_delimiter.join(
37
+ [
38
+ # TODO: is separating doc_to_text and doc_to_target by one space always desired?
39
+ (
40
+ self.doc_to_text(doc)
41
+ if (
42
+ self.config.doc_to_choice is None
43
+ or isinstance(self.doc_to_text(doc), str)
44
+ )
45
+ else self.doc_to_choice(doc)[self.doc_to_text(doc)]
46
+ )
47
+ + self.target_delimiter
48
+ + (
49
+ str(self.doc_to_target(doc)[0])
50
+ if isinstance(self.doc_to_target(doc), list)
51
+ else self.doc_to_target(doc)
52
+ if (
53
+ self.config.doc_to_choice is None
54
+ or isinstance(self.doc_to_target(doc), str)
55
+ )
56
+ else str(self.doc_to_choice(doc)[self.doc_to_target(doc)])
57
+ )
58
+ for doc in selected_docs
59
+ ]
60
+ )
61
+ + self.fewshot_delimiter
62
+ )
63
+
64
+ return labeled_examples
65
+
66
+ def sample(self, n):
67
+ """
68
+ Draw `n` samples from our fewshot docs. This method should be overridden by subclasses.
69
+ """
70
+
71
+ return self.rnd.sample(self.docs, n)
72
+
73
+
74
+ class FirstNSampler(ContextSampler):
75
+ def sample(self, n) -> None:
76
+ """
77
+ Draw the first `n` samples in order from the specified split.
78
+ Used for tasks with "canonical" ordered fewshot examples, such as MMLU and CMMLU.
79
+ """
80
+ assert (
81
+ n <= len(self.docs)
82
+ ), f"Error: number of fewshot samples requested exceeds the {len(self.docs)} that are available."
83
+ return self.docs[:n]
84
+
85
+
86
+ class BalancedSampler(ContextSampler):
87
+ def sample(self, n) -> None:
88
+ """
89
+ TODO: this should return approximately class-balanced samples from our fewshot examples.
90
+ TODO: what order should they be in? maybe random?
91
+ """
92
+
93
+ pass
94
+
95
+
96
+ class ManualSampler(ContextSampler):
97
+ def sample(self, n) -> None:
98
+ """ """
99
+ pass
100
+
101
+
102
+ SAMPLER_REGISTRY = {
103
+ "default": ContextSampler,
104
+ "first_n": FirstNSampler,
105
+ }
106
+
107
+
108
+ def get_sampler(name):
109
+ try:
110
+ return SAMPLER_REGISTRY[name]
111
+ except KeyError:
112
+ raise ValueError(
113
+ f"Attempted to use contextsampler '{name}', but no sampling strategy for this name found! Supported model names: {', '.join(SAMPLER_REGISTRY.keys())}"
114
+ )
lm-evaluation/build/lib/lm_eval/api/task.py ADDED
@@ -0,0 +1,1498 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import ast
3
+ import logging
4
+ import random
5
+ import re
6
+ from collections.abc import Callable
7
+ from copy import deepcopy
8
+ from dataclasses import asdict, dataclass
9
+ from inspect import getsource
10
+ from typing import (
11
+ Any,
12
+ Dict,
13
+ Iterable,
14
+ Iterator,
15
+ List,
16
+ Literal,
17
+ Mapping,
18
+ Optional,
19
+ Tuple,
20
+ Union,
21
+ )
22
+
23
+ import datasets
24
+ import numpy as np
25
+ from tqdm import tqdm
26
+
27
+ from lm_eval import utils
28
+ from lm_eval.api import samplers
29
+ from lm_eval.api.instance import Instance, OutputType
30
+ from lm_eval.api.metrics import bits_per_byte, mean, weighted_perplexity
31
+ from lm_eval.api.registry import (
32
+ AGGREGATION_REGISTRY,
33
+ DEFAULT_METRIC_REGISTRY,
34
+ get_aggregation,
35
+ get_metric,
36
+ get_metric_aggregation,
37
+ is_higher_better,
38
+ )
39
+ from lm_eval.caching.cache import load_from_cache, save_to_cache
40
+ from lm_eval.filters import build_filter_ensemble
41
+ from lm_eval.prompts import get_prompt
42
+
43
+
44
+ ALL_OUTPUT_TYPES = [
45
+ "loglikelihood",
46
+ "multiple_choice",
47
+ "loglikelihood_rolling",
48
+ "generate_until",
49
+ ]
50
+
51
+ eval_logger = logging.getLogger("lm-eval")
52
+
53
+
54
+ @dataclass
55
+ class TaskConfig(dict):
56
+ # task naming/registry
57
+ task: Optional[str] = None
58
+ task_alias: Optional[str] = None
59
+ group: Optional[Union[str, list]] = None
60
+ group_alias: Optional[Union[str, list]] = None
61
+ # HF dataset options.
62
+ # which dataset to use,
63
+ # and what splits for what purpose
64
+ dataset_path: Optional[str] = None
65
+ dataset_name: Optional[str] = None
66
+ dataset_kwargs: Optional[dict] = None
67
+ training_split: Optional[str] = None
68
+ validation_split: Optional[str] = None
69
+ test_split: Optional[str] = None
70
+ fewshot_split: Optional[
71
+ str
72
+ ] = None # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?)
73
+ # formatting / prompting options.
74
+ # see docs/advanced_task_guide.md for more info
75
+ process_docs: Optional[Callable] = None
76
+ doc_to_text: Optional[Union[Callable, str]] = None
77
+ doc_to_target: Optional[Union[Callable, str]] = None
78
+ doc_to_choice: Optional[Union[Callable, str, dict, list]] = None
79
+ process_results: Optional[Union[Callable, str]] = None
80
+ use_prompt: Optional[str] = None
81
+ description: str = ""
82
+ target_delimiter: str = " "
83
+ fewshot_delimiter: str = "\n\n"
84
+ fewshot_config: Optional[dict] = None
85
+ # runtime configuration options
86
+ num_fewshot: Optional[int] = None
87
+ # scoring options
88
+ metric_list: Optional[list] = None
89
+ output_type: OutputType = "generate_until"
90
+ generation_kwargs: Optional[dict] = None
91
+ repeats: int = 1
92
+ filter_list: Optional[Union[str, list]] = None
93
+ should_decontaminate: bool = False
94
+ doc_to_decontamination_query: Optional[str] = None
95
+ metadata: Optional[
96
+ dict
97
+ ] = None # by default, not used in the code. allows for users to pass arbitrary info to tasks
98
+
99
+ def __post_init__(self) -> None:
100
+ if self.generation_kwargs is not None:
101
+ if self.output_type != "generate_until":
102
+ raise ValueError(
103
+ f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!"
104
+ )
105
+
106
+ if "temperature" in self.generation_kwargs:
107
+ self.generation_kwargs["temperature"] = float(
108
+ self.generation_kwargs["temperature"]
109
+ )
110
+
111
+ if "until" not in self.generation_kwargs:
112
+ self.generation_kwargs["until"] = [self.fewshot_delimiter]
113
+ else:
114
+ if self.output_type == "generate_until":
115
+ # ensure that we greedily generate in absence of explicit arguments otherwise
116
+ self.generation_kwargs = {
117
+ "until": (
118
+ None
119
+ if self.fewshot_delimiter is None
120
+ else [self.fewshot_delimiter]
121
+ ),
122
+ "do_sample": False,
123
+ }
124
+
125
+ def __getitem__(self, item):
126
+ return getattr(self, item)
127
+
128
+ def __setitem__(self, item, value):
129
+ return setattr(self, item, value)
130
+
131
+ def to_dict(self, keep_callable: bool = False) -> dict:
132
+ """dumps the current config as a dictionary object, as a printable format.
133
+ null fields will not be printed.
134
+ Used for dumping results alongside full task configuration
135
+
136
+ :return: dict
137
+ A printable dictionary version of the TaskConfig object.
138
+
139
+ # TODO: should any default value in the TaskConfig not be printed?
140
+ """
141
+ cfg_dict = asdict(self)
142
+ # remove values that are `None`
143
+ for k, v in list(cfg_dict.items()):
144
+ if v is None:
145
+ cfg_dict.pop(k)
146
+ elif k == "metric_list":
147
+ for metric_dict in v:
148
+ for metric_key, metric_value in metric_dict.items():
149
+ if callable(metric_value):
150
+ metric_dict[metric_key] = self.serialize_function(
151
+ metric_value, keep_callable=keep_callable
152
+ )
153
+ cfg_dict[k] = v
154
+ elif callable(v):
155
+ cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable)
156
+ return cfg_dict
157
+
158
+ def serialize_function(
159
+ self, value: Union[Callable, str], keep_callable=False
160
+ ) -> Union[Callable, str]:
161
+ """Serializes a given function or string.
162
+
163
+ If 'keep_callable' is True, the original callable is returned.
164
+ Otherwise, attempts to return the source code of the callable using 'getsource'.
165
+ """
166
+ if keep_callable:
167
+ return value
168
+ else:
169
+ try:
170
+ return getsource(value)
171
+ except (TypeError, OSError):
172
+ return str(value)
173
+
174
+
175
+ class Task(abc.ABC):
176
+ """A task represents an entire benchmark including its dataset, problems,
177
+ answers, and evaluation methods. See BoolQ for a simple example implementation
178
+
179
+ A `doc` can be any python object which represents one instance of evaluation.
180
+ This is usually a dictionary e.g.
181
+ {"question": ..., "answer": ...} or
182
+ {"question": ..., question, answer)
183
+ """
184
+
185
+ VERSION: Optional[Union[int, str]] = None
186
+
187
+ # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
188
+ # or a path to a custom `datasets` loading script.
189
+ DATASET_PATH: Optional[str] = None
190
+
191
+ # The name of a subset within `DATASET_PATH`.
192
+ DATASET_NAME: Optional[str] = None
193
+
194
+ OUTPUT_TYPE: Optional[OutputType] = None
195
+
196
+ def __init__(
197
+ self,
198
+ data_dir: Optional[str] = None,
199
+ cache_dir: Optional[str] = None,
200
+ download_mode: Optional[datasets.DownloadMode] = None,
201
+ config: Optional[Mapping] = None, # Union[dict, TaskConfig]
202
+ ) -> None:
203
+ """
204
+ :param data_dir: str
205
+ Stores the path to a local folder containing the `Task`'s data files.
206
+ Use this to specify the path to manually downloaded data (usually when
207
+ the dataset is not publicly accessible).
208
+ :param cache_dir: str
209
+ The directory to read/write the `Task` dataset. This follows the
210
+ HuggingFace `datasets` API with the default cache directory located at:
211
+ `~/.cache/huggingface/datasets`
212
+ NOTE: You can change the cache location globally for a given process
213
+ to another directory:
214
+ `export HF_DATASETS_CACHE="/path/to/another/directory"`
215
+ :param download_mode: datasets.DownloadMode
216
+ How to treat pre-existing `Task` downloads and data.
217
+ - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
218
+ Reuse download and reuse dataset.
219
+ - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
220
+ Reuse download with fresh dataset.
221
+ - `datasets.DownloadMode.FORCE_REDOWNLOAD`
222
+ Fresh download and fresh dataset.
223
+ """
224
+ self.download(data_dir, cache_dir, download_mode)
225
+ self._training_docs: Optional[list] = None
226
+ self._fewshot_docs: Optional[list] = None
227
+ self._instances: Optional[List[Instance]] = None
228
+
229
+ self._config: TaskConfig = TaskConfig({**config}) if config else TaskConfig()
230
+
231
+ self._filters = [build_filter_ensemble("none", [["take_first", None]])]
232
+
233
+ def download(
234
+ self,
235
+ data_dir: Optional[str] = None,
236
+ cache_dir: Optional[str] = None,
237
+ download_mode=None,
238
+ ) -> None:
239
+ """Downloads and returns the task dataset.
240
+ Override this method to download the dataset from a custom API.
241
+
242
+ :param data_dir: str
243
+ Stores the path to a local folder containing the `Task`'s data files.
244
+ Use this to specify the path to manually downloaded data (usually when
245
+ the dataset is not publicly accessible).
246
+ :param cache_dir: str
247
+ The directory to read/write the `Task` dataset. This follows the
248
+ HuggingFace `datasets` API with the default cache directory located at:
249
+ `~/.cache/huggingface/datasets`
250
+ NOTE: You can change the cache location globally for a given process
251
+ by setting the shell environment variable, `HF_DATASETS_CACHE`,
252
+ to another directory:
253
+ `export HF_DATASETS_CACHE="/path/to/another/directory"`
254
+ :param download_mode: datasets.DownloadMode
255
+ How to treat pre-existing `Task` downloads and data.
256
+ - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
257
+ Reuse download and reuse dataset.
258
+ - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
259
+ Reuse download with fresh dataset.
260
+ - `datasets.DownloadMode.FORCE_REDOWNLOAD`
261
+ Fresh download and fresh dataset.
262
+ """
263
+ self.dataset = datasets.load_dataset(
264
+ path=self.DATASET_PATH,
265
+ name=self.DATASET_NAME,
266
+ data_dir=data_dir,
267
+ cache_dir=cache_dir,
268
+ download_mode=download_mode,
269
+ )
270
+
271
+ @property
272
+ def config(self) -> TaskConfig:
273
+ """Returns the TaskConfig associated with this class."""
274
+ return self._config
275
+
276
+ @abc.abstractmethod
277
+ def has_training_docs(self):
278
+ """Whether the task has a training set"""
279
+ pass
280
+
281
+ @abc.abstractmethod
282
+ def has_validation_docs(self):
283
+ """Whether the task has a validation set"""
284
+ pass
285
+
286
+ @abc.abstractmethod
287
+ def has_test_docs(self):
288
+ """Whether the task has a test set"""
289
+ pass
290
+
291
+ def training_docs(self) -> Iterable:
292
+ """
293
+ :return: Iterable[obj]
294
+ A iterable of any object, that doc_to_text can handle
295
+ """
296
+ return []
297
+
298
+ def validation_docs(self) -> Iterable:
299
+ """
300
+ :return: Iterable[obj]
301
+ A iterable of any object, that doc_to_text can handle
302
+ """
303
+ return []
304
+
305
+ def test_docs(self) -> Iterable:
306
+ """
307
+ :return: Iterable[obj]
308
+ A iterable of any object, that doc_to_text can handle
309
+ """
310
+ return []
311
+
312
+ def fewshot_docs(self) -> Iterable:
313
+ """
314
+ :return: Iterable[obj]
315
+ A iterable of any object, that doc_to_text can handle
316
+ """
317
+ if self.has_training_docs():
318
+ return self.training_docs()
319
+ elif self.has_validation_docs():
320
+ return self.validation_docs()
321
+ else:
322
+ eval_logger.warning(
323
+ f"[Task: {self.config.task}] has_training_docs and has_validation_docs are False"
324
+ ", using test_docs as fewshot_docs but this is not recommended."
325
+ )
326
+ return self.test_docs()
327
+
328
+ def _process_doc(self, doc: dict) -> dict:
329
+ """
330
+ Override this to process (detokenize, strip, replace, etc.) individual
331
+ documents. This can be used in a map over documents of a data split.
332
+ E.g. `map(self._process_doc, self.dataset["validation"])`
333
+
334
+ :return: dict
335
+ The processed version of the specified `doc`.
336
+ """
337
+ return doc
338
+
339
+ @property
340
+ def instances(self) -> List[Instance]:
341
+ """After calling `task.build_all_requests()`, tasks
342
+ maintain a list of the dataset instances which will be evaluated.
343
+ """
344
+ return self._instances
345
+
346
+ def fewshot_examples(self, k, rnd):
347
+ if self._training_docs is None:
348
+ self._training_docs = list(self.training_docs())
349
+
350
+ return rnd.sample(self._training_docs, k)
351
+
352
+ def doc_to_decontamination_query(self, doc):
353
+ raise NotImplementedError(
354
+ "Override doc_to_decontamination_query with document specific decontamination query."
355
+ )
356
+
357
+ @abc.abstractmethod
358
+ def doc_to_text(self, doc):
359
+ pass
360
+
361
+ @abc.abstractmethod
362
+ def doc_to_target(self, doc):
363
+ pass
364
+
365
+ def build_all_requests(
366
+ self,
367
+ *,
368
+ limit=None,
369
+ rank=None,
370
+ world_size=None,
371
+ cache_requests=False,
372
+ rewrite_requests_cache=False,
373
+ ) -> None:
374
+ """Build a set of Instances for a task, and store them in task.instances"""
375
+
376
+ # used with caching
377
+ og_limit = limit
378
+
379
+ cache_key = f"requests-{self._config.task}-{self.config.num_fewshot}shot-rank{rank}-world_size{world_size}"
380
+
381
+ cached_instances = load_from_cache(file_name=cache_key)
382
+
383
+ if cache_requests and cached_instances and not rewrite_requests_cache:
384
+ cached_instances = cached_instances[:limit]
385
+
386
+ flattened_instances = [
387
+ instance
388
+ for instance_group in cached_instances
389
+ for instance in instance_group
390
+ ]
391
+
392
+ self._instances = flattened_instances
393
+ return
394
+
395
+ eval_logger.info(f"Building contexts for {self.config.task} on rank {rank}...")
396
+
397
+ instances = []
398
+
399
+ # process all documents when caching is specified for simplicity
400
+ if (
401
+ cache_requests
402
+ and (not cached_instances or rewrite_requests_cache)
403
+ and limit is not None
404
+ ):
405
+ limit = None
406
+
407
+ doc_id_docs = list(
408
+ self.doc_iterator(rank=rank, limit=limit, world_size=world_size)
409
+ )
410
+
411
+ num_docs = len(doc_id_docs)
412
+
413
+ for doc_id, doc in tqdm(
414
+ doc_id_docs,
415
+ total=num_docs,
416
+ ):
417
+ # sample fewshot context #TODO: need to offset doc_id by rank now!
418
+ fewshot_ctx = self.fewshot_context(
419
+ doc,
420
+ 0 if self.config.num_fewshot is None else self.config.num_fewshot,
421
+ )
422
+
423
+ # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute
424
+ inst = self.construct_requests(
425
+ doc=doc,
426
+ ctx=fewshot_ctx,
427
+ metadata=(self.config["task"], doc_id, self.config.repeats),
428
+ )
429
+
430
+ if not isinstance(inst, list):
431
+ inst = [inst]
432
+
433
+ instances.append(inst)
434
+
435
+ # now flatten, this is to allow slicing to work with pickles
436
+
437
+ sliced_instances = instances[:og_limit]
438
+
439
+ flattened_instances = [
440
+ instance
441
+ for instance_group in sliced_instances
442
+ for instance in instance_group
443
+ ]
444
+
445
+ self._instances = flattened_instances
446
+
447
+ if len(self._instances) == 0:
448
+ raise ValueError("task.build_requests() did not find any docs!")
449
+
450
+ if cache_requests and (not cached_instances or rewrite_requests_cache):
451
+ save_to_cache(file_name=cache_key, obj=instances)
452
+
453
+ @abc.abstractmethod
454
+ def construct_requests(self, doc, ctx, **kwargs):
455
+ """Uses RequestFactory to construct Requests and returns an iterable of
456
+ Requests which will be sent to the LM.
457
+
458
+ :param doc:
459
+ The document as returned from training_docs, validation_docs, or test_docs.
460
+ :param ctx: str
461
+ The context string, generated by fewshot_context. This includes the natural
462
+ language description, as well as the few shot examples, and the question
463
+ part of the document for `doc`.
464
+ :param doc_idx: int
465
+ The index of a document within `self.test_docs()` or `self.validation_docs()`,
466
+ whichever is the main split used.
467
+ :param repeats: int
468
+ TODO: update this docstring
469
+ The number of times each instance in a dataset is inferred on. Defaults to 1,
470
+ can be increased for techniques like majority voting.
471
+ """
472
+ pass
473
+
474
+ @abc.abstractmethod
475
+ def process_results(self, doc, results):
476
+ """Take a single document and the LM results and evaluates, returning a
477
+ dict where keys are the names of submetrics and values are the values of
478
+ the metric for that one document
479
+
480
+ :param doc:
481
+ The document as returned from training_docs, validation_docs, or test_docs.
482
+ :param results:
483
+ The results of the requests created in construct_requests.
484
+ """
485
+ pass
486
+
487
+ @abc.abstractmethod
488
+ def aggregation(self):
489
+ """
490
+ :returns: {str: [metric_score] -> float}
491
+ A dictionary where keys are the names of submetrics and values are
492
+ functions that aggregate a list of metric scores
493
+ """
494
+ pass
495
+
496
+ @abc.abstractmethod
497
+ def higher_is_better(self):
498
+ """
499
+ :returns: {str: bool}
500
+ A dictionary where keys are the names of submetrics and values are
501
+ whether a higher value of the submetric is better
502
+ """
503
+ pass
504
+
505
+ def get_config(self, key: str) -> Any:
506
+ return getattr(self._config, key, None)
507
+
508
+ @classmethod
509
+ def count_bytes(cls, doc):
510
+ """Used for byte-level perplexity metrics in rolling loglikelihood"""
511
+ return len(doc.encode("utf-8"))
512
+
513
+ @classmethod
514
+ def count_words(cls, doc):
515
+ """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!"""
516
+ return len(re.split(r"\s+", doc))
517
+
518
+ @utils.positional_deprecated
519
+ def fewshot_context(
520
+ self,
521
+ doc,
522
+ num_fewshot,
523
+ rnd=random.Random(1234),
524
+ description=None,
525
+ ):
526
+ """Returns a fewshot context string that is made up of a prepended description
527
+ (if provided), the `num_fewshot` number of examples, and an appended prompt example.
528
+
529
+ :param doc: str
530
+ The document as returned from training_docs, validation_docs, or test_docs.
531
+ :param num_fewshot: int
532
+ The number of fewshot examples to provide in the returned context string.
533
+ :param rnd: random.Random
534
+ The pseudo-random number generator used to randomly sample examples.
535
+ WARNING: This is currently a required arg although it's optionalized with a default `None`.
536
+ :param description: str
537
+ The task's description that will be prepended to the fewshot examples.
538
+ :returns: str
539
+ The fewshot context.
540
+ """
541
+ if rnd is None:
542
+ raise ValueError(
543
+ "A `random.Random` generator argument must be provided to `rnd`"
544
+ )
545
+
546
+ description = description if description else ""
547
+
548
+ if num_fewshot == 0:
549
+ labeled_examples = ""
550
+ else:
551
+ # for sets with no training docs, draw from other set *but ensure no overlap with current doc*
552
+ if self.has_training_docs():
553
+ fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
554
+ else:
555
+ if self._fewshot_docs is None:
556
+ self._fewshot_docs = list(
557
+ self.validation_docs()
558
+ if self.has_validation_docs()
559
+ else self.test_docs()
560
+ )
561
+
562
+ fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)
563
+
564
+ # get rid of the doc that's the one we're evaluating, if it's in the fewshot
565
+ fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]
566
+
567
+ labeled_examples = (
568
+ "\n\n".join(
569
+ [
570
+ self.doc_to_text(doc) + self.doc_to_target(doc)
571
+ for doc in fewshotex
572
+ ]
573
+ )
574
+ + "\n\n"
575
+ )
576
+
577
+ example = self.doc_to_text(doc)
578
+ return description + labeled_examples + example
579
+
580
+ def apply_filters(self) -> Optional[List[Instance]]:
581
+ """Iterates over FilterEnsembles and applies them to instances"""
582
+ if hasattr(self, "_filters"):
583
+ for f in self._filters:
584
+ f.apply(self._instances)
585
+ else:
586
+ eval_logger.warning("No filter defined, passing through instances")
587
+ return self._instances
588
+
589
+ def dump_config(self) -> dict:
590
+ """Returns the config as a dictionary."""
591
+ # TODO: this should only return the overrides applied to a non-YAML task's configuration.
592
+ # (num_fewshot)
593
+ return self.config.to_dict()
594
+
595
+ def set_config(self, key: str, value: Any, update: bool = False) -> None:
596
+ """Set or update the configuration for a given key."""
597
+ if key is None:
598
+ raise ValueError("Key must be provided.")
599
+
600
+ if update:
601
+ current_value = getattr(self._config, key, {})
602
+ if not isinstance(current_value, dict):
603
+ raise TypeError(
604
+ f"Expected a dict for key '{key}', got {type(current_value).__name__} instead."
605
+ )
606
+ current_value.update(value)
607
+ else:
608
+ setattr(self._config, key, value)
609
+
610
+ def override_metric(self, metric_name: str) -> None:
611
+ """
612
+ Override the default metrics used for evaluation with custom metrics.
613
+
614
+ Parameters:
615
+ - metric_name (str): The name of the custom metric to override. Should be registered in api.metrics.
616
+ """
617
+ (
618
+ self._metric_fn_list,
619
+ self._aggregation_list,
620
+ self._metric_fn_kwargs,
621
+ self._higher_is_better,
622
+ ) = ({}, {}, {}, {})
623
+ self._metric_fn_list[metric_name] = get_metric(metric_name)
624
+ self._aggregation_list[metric_name] = get_metric_aggregation(metric_name)
625
+ self._higher_is_better[metric_name] = is_higher_better(metric_name)
626
+ self._metric_fn_kwargs[metric_name] = {}
627
+ if not isinstance(self, ConfigurableTask):
628
+ self.process_results = lambda x, y: {metric_name: get_metric(metric_name)}
629
+ self.aggregation = lambda: {
630
+ metric_name: get_metric_aggregation(metric_name)
631
+ }
632
+ setattr(self._config, "metric_list", [{"metric": metric_name}])
633
+ setattr(self._config, "process_results", None)
634
+
635
+ @property
636
+ def eval_docs(self) -> Union[datasets.Dataset, List[dict]]:
637
+ if self.has_test_docs():
638
+ return self.test_docs()
639
+ elif self.has_validation_docs():
640
+ return self.validation_docs()
641
+ else:
642
+ raise ValueError(
643
+ f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!"
644
+ )
645
+
646
+ def doc_iterator(
647
+ self, *, rank: int = 0, limit: Union[int, None] = None, world_size: int = 1
648
+ ) -> Iterator[Tuple[int, Any]]:
649
+ limit = int(limit) if limit else None
650
+ doc_iterator = utils.create_iterator(
651
+ enumerate(self.eval_docs),
652
+ rank=int(rank),
653
+ limit=limit,
654
+ world_size=int(world_size),
655
+ )
656
+ return doc_iterator
657
+
658
+
659
+ class ConfigurableTask(Task):
660
+ VERSION = "Yaml"
661
+ OUTPUT_TYPE = None
662
+ CONFIG = None
663
+
664
+ def __init__(
665
+ self,
666
+ data_dir=None,
667
+ cache_dir=None,
668
+ download_mode=None,
669
+ config: Optional[dict] = None,
670
+ ) -> None: # TODO no super() call here
671
+ # Get pre-configured attributes
672
+ self._config = self.CONFIG
673
+
674
+ # Use new configurations if there was no preconfiguration
675
+ if self.config is None:
676
+ self._config = TaskConfig(**config)
677
+ # Overwrite configs
678
+ else:
679
+ if config is not None:
680
+ self._config.__dict__.update(config)
681
+
682
+ if self.config is None:
683
+ raise ValueError(
684
+ "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg"
685
+ )
686
+
687
+ if isinstance(self.config.metadata, dict):
688
+ if "version" in self.config.metadata:
689
+ self.VERSION = self.config.metadata["version"]
690
+
691
+ if self.config.output_type is not None:
692
+ if self.config.output_type not in ALL_OUTPUT_TYPES:
693
+ raise ValueError(
694
+ f"Got invalid output_type '{self.config.output_type}', must be in '{','.join(ALL_OUTPUT_TYPES)}'"
695
+ )
696
+ self.OUTPUT_TYPE = self.config.output_type
697
+
698
+ if self.config.dataset_path is not None:
699
+ self.DATASET_PATH = self.config.dataset_path
700
+
701
+ if self.config.dataset_name is not None:
702
+ self.DATASET_NAME = self.config.dataset_name
703
+
704
+ self._metric_fn_list = {}
705
+ self._metric_fn_kwargs = {}
706
+ self._aggregation_list = {}
707
+ self._higher_is_better = {}
708
+
709
+ if self.config.metric_list is None:
710
+ # TODO: handle this in TaskConfig.__post_init__ ?
711
+ _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type]
712
+
713
+ for metric_name in _metric_list:
714
+ self._metric_fn_list[metric_name] = get_metric(metric_name)
715
+ self._metric_fn_kwargs[metric_name] = {}
716
+ self._aggregation_list[metric_name] = get_metric_aggregation(
717
+ metric_name
718
+ )
719
+ self._higher_is_better[metric_name] = is_higher_better(metric_name)
720
+ else:
721
+ for metric_config in self.config.metric_list:
722
+ if "metric" not in metric_config:
723
+ raise ValueError(
724
+ "'metric' key not provided for an entry in 'metric_list', must be specified!"
725
+ )
726
+ metric_name = metric_config["metric"]
727
+ kwargs = {
728
+ key: metric_config[key]
729
+ for key in metric_config
730
+ if key
731
+ not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"]
732
+ }
733
+ hf_evaluate_metric = (
734
+ "hf_evaluate" in metric_config
735
+ and metric_config["hf_evaluate"] is True
736
+ )
737
+
738
+ if self.config.process_results is not None:
739
+ self._metric_fn_list[metric_name] = None
740
+ self._metric_fn_kwargs[metric_name] = {}
741
+ elif callable(metric_name):
742
+ metric_fn = metric_name.__call__
743
+ metric_name = metric_name.__name__
744
+ self._metric_fn_list[metric_name] = metric_fn
745
+ self._metric_fn_kwargs[metric_name] = kwargs
746
+ else:
747
+ self._metric_fn_list[metric_name] = get_metric(
748
+ metric_name, hf_evaluate_metric
749
+ )
750
+ self._metric_fn_kwargs[metric_name] = kwargs
751
+
752
+ if "aggregation" in metric_config:
753
+ agg_name = metric_config["aggregation"]
754
+ if isinstance(agg_name, str):
755
+ self._aggregation_list[metric_name] = get_aggregation(agg_name)
756
+ elif callable(agg_name): # noqa: E721
757
+ self._aggregation_list[metric_name] = metric_config[
758
+ "aggregation"
759
+ ]
760
+ else:
761
+ INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()}
762
+ metric_agg = get_metric_aggregation(metric_name)
763
+ eval_logger.warning(
764
+ f"[Task: {self.config.task}] metric {metric_name} is defined, but aggregation is not. "
765
+ f"using default "
766
+ f"aggregation={INV_AGG_REGISTRY[metric_agg]}"
767
+ )
768
+ self._aggregation_list[metric_name] = metric_agg
769
+
770
+ if "higher_is_better" in metric_config:
771
+ self._higher_is_better[metric_name] = metric_config[
772
+ "higher_is_better"
773
+ ]
774
+ else:
775
+ eval_logger.warning(
776
+ f"[Task: {self.config.task}] metric {metric_name} is defined, but higher_is_better is not. "
777
+ f"using default "
778
+ f"higher_is_better={is_higher_better(metric_name)}"
779
+ )
780
+ self._higher_is_better[metric_name] = is_higher_better(metric_name)
781
+
782
+ self.download(self.config.dataset_kwargs)
783
+ self._training_docs = None
784
+ self._fewshot_docs = None
785
+
786
+ if self.config.filter_list is not None:
787
+ self._filters = []
788
+ for filter_config in self.config.filter_list:
789
+ filter_name = filter_config["name"]
790
+ filter_functions = filter_config["filter"]
791
+ components = []
792
+ for function in filter_functions:
793
+ kwargs = {
794
+ key: function[key] for key in function if key != "function"
795
+ }
796
+ components.append([function["function"], kwargs])
797
+ filter_pipeline = build_filter_ensemble(filter_name, components)
798
+ self._filters.append(filter_pipeline)
799
+ else:
800
+ self._filters = [build_filter_ensemble("none", [["take_first", None]])]
801
+
802
+ if self.config.use_prompt is not None:
803
+ eval_logger.info(f"loading prompt {self.config.use_prompt}")
804
+ self.prompt = get_prompt(
805
+ self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME
806
+ )
807
+ else:
808
+ self.prompt = None
809
+
810
+ if self.fewshot_docs() is not None:
811
+ self.sampler = samplers.get_sampler(
812
+ self.config.fewshot_config.get("sampler", "default")
813
+ if self.config.fewshot_config
814
+ else "default"
815
+ )(list(self.fewshot_docs()), self, rnd=random.Random(1234))
816
+
817
+ self.task_docs = self.eval_docs
818
+
819
+ # Test One Doc
820
+ self.features = list(self.task_docs.features.keys())
821
+ self.multiple_input = 0
822
+ self.multiple_target = 0
823
+ test_doc = self.task_docs[0]
824
+ test_text = self.doc_to_text(test_doc)
825
+ test_target = self.doc_to_target(test_doc)
826
+
827
+ if self.config.doc_to_choice is not None:
828
+ test_choice = self.doc_to_choice(test_doc)
829
+ if not isinstance(test_choice, list):
830
+ eval_logger.error("doc_to_choice must return list")
831
+ else:
832
+ num_choice = len(test_choice)
833
+
834
+ if isinstance(test_text, int):
835
+ self.multiple_input = num_choice
836
+ else:
837
+ test_choice = None
838
+
839
+ if isinstance(test_target, list):
840
+ self.multiple_target = len(test_target)
841
+ else:
842
+ if (isinstance(test_target, int)) and (test_choice is not None):
843
+ test_target = test_choice[test_target]
844
+ else:
845
+ test_target = str(test_target)
846
+
847
+ if test_choice is not None:
848
+ check_choices = test_choice
849
+ else:
850
+ check_choices = [test_target]
851
+ if self.config.doc_to_choice is not None:
852
+ for choice in check_choices:
853
+ choice_has_whitespace = True if choice[0].isspace() else False
854
+ delimiter_has_whitespace = (
855
+ True
856
+ if self.config.target_delimiter.rstrip()
857
+ != self.config.target_delimiter
858
+ else False
859
+ )
860
+
861
+ if delimiter_has_whitespace and choice_has_whitespace:
862
+ eval_logger.debug(
863
+ f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace'
864
+ )
865
+ elif (not delimiter_has_whitespace) and (not choice_has_whitespace):
866
+ eval_logger.debug(
867
+ f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace'
868
+ )
869
+
870
+ def download(self, dataset_kwargs: Optional[Dict[str, Any]] = None) -> None:
871
+ self.dataset = datasets.load_dataset(
872
+ path=self.DATASET_PATH,
873
+ name=self.DATASET_NAME,
874
+ **dataset_kwargs if dataset_kwargs is not None else {},
875
+ )
876
+
877
+ def has_training_docs(self) -> bool:
878
+ if self.config.training_split is not None:
879
+ return True
880
+ else:
881
+ return False
882
+
883
+ def has_validation_docs(self) -> bool:
884
+ if self.config.validation_split is not None:
885
+ return True
886
+ else:
887
+ return False
888
+
889
+ def has_test_docs(self) -> bool:
890
+ if self.config.test_split is not None:
891
+ return True
892
+ else:
893
+ return False
894
+
895
+ def training_docs(self) -> datasets.Dataset:
896
+ if self.has_training_docs():
897
+ if self.config.process_docs is not None:
898
+ return self.config.process_docs(
899
+ self.dataset[self.config.training_split]
900
+ )
901
+ return self.dataset[self.config.training_split]
902
+
903
+ def validation_docs(self) -> datasets.Dataset:
904
+ if self.has_validation_docs():
905
+ if self.config.process_docs is not None:
906
+ return self.config.process_docs(
907
+ self.dataset[self.config.validation_split]
908
+ )
909
+ return self.dataset[self.config.validation_split]
910
+
911
+ def test_docs(self) -> datasets.Dataset:
912
+ if self.has_test_docs():
913
+ if self.config.process_docs is not None:
914
+ return self.config.process_docs(self.dataset[self.config.test_split])
915
+ return self.dataset[self.config.test_split]
916
+
917
+ def fewshot_docs(self):
918
+ if self.config.fewshot_split is not None:
919
+ if self.config.process_docs is not None:
920
+ return self.config.process_docs(self.dataset[self.config.fewshot_split])
921
+ return self.dataset[self.config.fewshot_split]
922
+ else:
923
+ if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0):
924
+ eval_logger.warning(
925
+ f"Task '{self.config.task}': "
926
+ "num_fewshot > 0 but fewshot_split is None. "
927
+ "using preconfigured rule."
928
+ )
929
+ return super().fewshot_docs()
930
+
931
+ @utils.positional_deprecated
932
+ def fewshot_context(self, doc: str, num_fewshot: int) -> str:
933
+ """Returns a fewshot context string that is made up of a prepended description
934
+ (if provided), the `num_fewshot` number of examples, and an appended prompt example.
935
+
936
+ :param doc: str
937
+ The document as returned from training_docs, validation_docs, or test_docs.
938
+ :param num_fewshot: int
939
+ The number of fewshot examples to provide in the returned context string.
940
+ :returns: str
941
+ The fewshot context.
942
+ """
943
+ if description := self.config.description:
944
+ description = utils.apply_template(self.config.description, doc)
945
+
946
+ if num_fewshot == 0:
947
+ # always prepend the (possibly empty) task description
948
+ labeled_examples = description
949
+ else:
950
+ labeled_examples = description + self.sampler.get_context(doc, num_fewshot)
951
+
952
+ example = self.doc_to_text(doc)
953
+ if self.multiple_input:
954
+ return labeled_examples
955
+ else:
956
+ if isinstance(example, str):
957
+ return labeled_examples + example
958
+ elif isinstance(example, list):
959
+ return [labeled_examples + ex for ex in example]
960
+ elif isinstance(example, int):
961
+ if self.config.doc_to_choice is not None:
962
+ choices = self.doc_to_choice(doc)
963
+ return labeled_examples + choices[example]
964
+ else:
965
+ return labeled_examples + str(example)
966
+
967
+ def apply_filters(self):
968
+ """Iterates over FilterEnsembles and applies them to instances"""
969
+ if hasattr(self, "_filters"):
970
+ for f in self._filters:
971
+ f.apply(self._instances)
972
+ else:
973
+ eval_logger.warning("No filter defined, passing through instances")
974
+ return self._instances
975
+
976
+ def should_decontaminate(self):
977
+ return self.config.should_decontaminate
978
+
979
+ def doc_to_decontamination_query(self, doc):
980
+ if self.config.should_decontaminate:
981
+ if self.config.doc_to_decontamination_query is None:
982
+ return self.doc_to_text(doc)
983
+ else:
984
+ doc_to_decontamination_query = self.config.doc_to_decontamination_query
985
+ if doc_to_decontamination_query in self.features:
986
+ return doc[doc_to_decontamination_query]
987
+ elif callable(doc_to_decontamination_query):
988
+ return doc_to_decontamination_query(doc)
989
+ else:
990
+ return ast.literal_eval(
991
+ utils.apply_template(
992
+ self.config.doc_to_decontamination_query, doc
993
+ )
994
+ )
995
+
996
+ def _process_doc(self, doc: dict) -> dict:
997
+ """
998
+ Override this to process (detokenize, strip, replace, etc.) individual
999
+ documents. This can be used in a map over documents of a data split.
1000
+ E.g. `map(self._process_doc, self.dataset["validation"])`
1001
+
1002
+ :return: dict
1003
+ The processed version of the specified `doc`.
1004
+ """
1005
+ return doc
1006
+
1007
+ def doc_to_text(self, doc):
1008
+ if self.prompt is not None:
1009
+ doc_to_text = self.prompt
1010
+ else:
1011
+ doc_to_text = self.config.doc_to_text
1012
+
1013
+ if isinstance(doc_to_text, int):
1014
+ return doc_to_text
1015
+ elif isinstance(doc_to_text, str):
1016
+ if doc_to_text in self.features:
1017
+ # if self.config.doc_to_choice is not None:
1018
+ # return self.doc_to_choice(doc)[doc[doc_to_text]]
1019
+ # else:
1020
+ return doc[doc_to_text]
1021
+ else:
1022
+ text_string = utils.apply_template(doc_to_text, doc)
1023
+ if text_string.isdigit() and self._config.doc_to_choice is not None:
1024
+ return ast.literal_eval(text_string)
1025
+ else:
1026
+ return text_string
1027
+ elif callable(doc_to_text):
1028
+ return doc_to_text(doc)
1029
+ # Used when applying a Promptsource template
1030
+ elif hasattr(doc_to_text, "apply"):
1031
+ applied_prompt = doc_to_text.apply(doc)
1032
+ if len(applied_prompt) == 2:
1033
+ return applied_prompt[0]
1034
+ else:
1035
+ eval_logger.warning("Applied prompt returns empty string")
1036
+ return self.config.fewshot_delimiter
1037
+ else:
1038
+ print(type(doc_to_text))
1039
+ raise TypeError
1040
+
1041
+ def doc_to_target(self, doc: Mapping) -> Union[int, str, list]:
1042
+ if self.prompt is not None:
1043
+ doc_to_target = self.prompt
1044
+ else:
1045
+ doc_to_target = self.config.doc_to_target
1046
+
1047
+ if isinstance(doc_to_target, int):
1048
+ return doc_to_target
1049
+ elif isinstance(doc_to_target, str):
1050
+ if doc_to_target in self.features:
1051
+ # if self.config.doc_to_choice is not None:
1052
+ # return self.doc_to_choice(doc)[doc[doc_to_target]]
1053
+ # else:
1054
+ return doc[doc_to_target]
1055
+ else:
1056
+ target_string = utils.apply_template(doc_to_target, doc)
1057
+ if target_string.isdigit() and self._config.doc_to_choice is not None:
1058
+ return ast.literal_eval(target_string)
1059
+ elif (
1060
+ len(target_string) >= 2
1061
+ and (target_string[0] == "[")
1062
+ and (target_string[-1] == "]")
1063
+ ):
1064
+ try:
1065
+ return ast.literal_eval(target_string)
1066
+ except (SyntaxError, ValueError):
1067
+ return target_string
1068
+ else:
1069
+ return target_string
1070
+ elif isinstance(doc_to_target, list):
1071
+ return doc_to_target
1072
+ elif callable(doc_to_target):
1073
+ return doc_to_target(doc)
1074
+ # Used when applying a Promptsource template
1075
+ elif hasattr(doc_to_target, "apply"):
1076
+ applied_prompt = doc_to_target.apply(doc)
1077
+ if len(applied_prompt) == 2:
1078
+ return applied_prompt[1]
1079
+ else:
1080
+ eval_logger.warning("Applied prompt returns empty string")
1081
+ return self.config.fewshot_delimiter
1082
+ else:
1083
+ raise TypeError
1084
+
1085
+ def doc_to_choice(self, doc: Any) -> List[str]:
1086
+ if self.prompt is not None:
1087
+ doc_to_choice = self.prompt
1088
+ elif self.config.doc_to_choice is None:
1089
+ eval_logger.error("doc_to_choice was called but not set in config")
1090
+ else:
1091
+ doc_to_choice = self.config.doc_to_choice
1092
+
1093
+ if isinstance(doc_to_choice, str):
1094
+ if doc_to_choice in self.features:
1095
+ return doc[doc_to_choice]
1096
+ else:
1097
+ return ast.literal_eval(utils.apply_template(doc_to_choice, doc))
1098
+ elif isinstance(doc_to_choice, list):
1099
+ return doc_to_choice
1100
+ elif isinstance(doc_to_choice, dict):
1101
+ return list(doc_to_choice.values())
1102
+ elif callable(doc_to_choice):
1103
+ return doc_to_choice(doc)
1104
+ elif hasattr(doc_to_choice, "get_answer_choices_list"):
1105
+ return doc_to_choice.get_answer_choices_list(doc)
1106
+ else:
1107
+ raise TypeError
1108
+
1109
+ def construct_requests(
1110
+ self, doc: dict, ctx: str, **kwargs
1111
+ ) -> Union[List[Instance], Instance]:
1112
+ if self.OUTPUT_TYPE == "loglikelihood":
1113
+ arguments = (ctx, self.doc_to_target(doc))
1114
+ elif self.OUTPUT_TYPE == "loglikelihood_rolling":
1115
+ arguments = (self.doc_to_target(doc),)
1116
+ elif self.OUTPUT_TYPE == "multiple_choice":
1117
+ choices = self.doc_to_choice(doc)
1118
+ target_delimiter = self.config.target_delimiter
1119
+ if self.multiple_input:
1120
+ # If there are multiple inputs, choices are placed in the ctx
1121
+ cont = self.doc_to_target(doc)
1122
+ arguments = [
1123
+ (ctx + choice, f"{target_delimiter}{cont}") for choice in choices
1124
+ ]
1125
+ else:
1126
+ # Otherwise they are placed in the continuation
1127
+ arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices]
1128
+
1129
+ request_list = [
1130
+ Instance(
1131
+ request_type="loglikelihood",
1132
+ doc=doc,
1133
+ arguments=arg,
1134
+ idx=i,
1135
+ **kwargs,
1136
+ )
1137
+ for i, arg in enumerate(arguments)
1138
+ ]
1139
+ # TODO: we should raise a warning telling users this will at most ~2x runtime.
1140
+ if "acc_mutual_info" in self._metric_fn_list.keys():
1141
+ # if we are calculating multiple choice accuracy
1142
+ # using mutual information instead of raw loglikelihood as metric, need unconditional lls.
1143
+
1144
+ # here mutual info refers to calculating
1145
+ # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice))
1146
+ # in other words normalizing by subtracting the unconditional logprob of each choice.
1147
+ request_list.extend(
1148
+ [
1149
+ Instance(
1150
+ request_type="loglikelihood",
1151
+ doc=doc,
1152
+ arguments=("", "{}".format(choice)),
1153
+ idx=i,
1154
+ **kwargs,
1155
+ )
1156
+ for i, choice in enumerate(choices)
1157
+ ]
1158
+ )
1159
+ return request_list
1160
+
1161
+ elif self.OUTPUT_TYPE == "generate_until":
1162
+ arguments = (ctx, deepcopy(self.config.generation_kwargs))
1163
+
1164
+ return Instance(
1165
+ request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs
1166
+ )
1167
+
1168
+ def process_results(self, doc, results):
1169
+ if callable(self.config.process_results):
1170
+ return self.config.process_results(doc, results)
1171
+
1172
+ result_dict = {}
1173
+ use_metric = list(self._metric_fn_list.keys())
1174
+ if self.OUTPUT_TYPE == "loglikelihood":
1175
+ results = results[0]
1176
+ ll, is_greedy = results
1177
+ return {
1178
+ **({"perplexity": ll} if "perplexity" in use_metric else {}),
1179
+ **({"acc": int(is_greedy)} if "acc" in use_metric else {}),
1180
+ }
1181
+ elif self.OUTPUT_TYPE == "loglikelihood_rolling":
1182
+ (loglikelihood,) = results
1183
+ _words = self.count_words(self.doc_to_target(doc))
1184
+ _bytes = self.count_bytes(self.doc_to_target(doc))
1185
+ return {
1186
+ **(
1187
+ {"word_perplexity": (loglikelihood, _words)}
1188
+ if "word_perplexity" in use_metric
1189
+ else {}
1190
+ ),
1191
+ **(
1192
+ {"byte_perplexity": (loglikelihood, _bytes)}
1193
+ if "byte_perplexity" in use_metric
1194
+ else {}
1195
+ ),
1196
+ **(
1197
+ {"bits_per_byte": (loglikelihood, _bytes)}
1198
+ if "bits_per_byte" in use_metric
1199
+ else {}
1200
+ ),
1201
+ }
1202
+ elif self.OUTPUT_TYPE == "multiple_choice":
1203
+ lls, is_greedy = zip(*results)
1204
+
1205
+ # retrieve choices in List[str] form, to compute choice lengths, etc.
1206
+ choices = self.doc_to_choice(doc)
1207
+ completion_len = np.array([float(len(i)) for i in choices])
1208
+
1209
+ if (
1210
+ 2 * len(choices) == len(lls)
1211
+ and "acc_mutual_info" in self._metric_fn_list.keys()
1212
+ ):
1213
+ # then we are doing mutual info.
1214
+ # this stores the "dryrun" / unconditional answer loglikelihoods
1215
+ lls_unconditional = lls[1::2]
1216
+ if len(lls_unconditional) != len(choices):
1217
+ raise ValueError
1218
+ # and this stores our "regular" conditional loglikelihoods
1219
+ lls = lls[::2]
1220
+
1221
+ pred = np.argmax(lls)
1222
+ pred_norm = np.argmax(lls / completion_len)
1223
+
1224
+ if self.multiple_input:
1225
+ gold = self.doc_to_text(doc)
1226
+ else:
1227
+ gold = self.doc_to_target(doc)
1228
+
1229
+ gold_index_error = False
1230
+ if isinstance(gold, list):
1231
+ gold = [i if i < len(choices) else -100 for i in gold]
1232
+ if -100 in gold:
1233
+ gold_index_error = True
1234
+ else:
1235
+ if isinstance(gold, int):
1236
+ gold = gold if gold < len(choices) else -100
1237
+ elif isinstance(gold, str):
1238
+ gold = choices.index(gold) if gold in choices else -100
1239
+
1240
+ if gold == -100:
1241
+ gold_index_error = True
1242
+
1243
+ if gold_index_error:
1244
+ eval_logger.warning(
1245
+ f"Label index was not in within range of available choices,"
1246
+ f"Sample:\n\n{doc}\n\n"
1247
+ )
1248
+
1249
+ if self.multiple_target:
1250
+ acc = 1.0 if pred in gold else 0.0
1251
+ acc_norm = 1.0 if pred_norm in gold else 0.0
1252
+ exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold]))
1253
+ else:
1254
+ acc = 1.0 if pred == gold else 0.0
1255
+ acc_norm = 1.0 if pred_norm == gold else 0.0
1256
+ # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly
1257
+ exact_match = int(is_greedy[gold]) if gold != -100 else 0
1258
+
1259
+ prob_norm = utils.softmax(lls)
1260
+
1261
+ # TODO use keyword arguments to the metric?
1262
+ # gold, pred, norm stuff, the original lls,
1263
+ result_dict = {
1264
+ **({"acc": acc} if "acc" in use_metric else {}),
1265
+ **({"f1": (gold, pred)} if "f1" in use_metric else {}),
1266
+ **({"mcc": (gold, pred)} if "mcc" in use_metric else {}),
1267
+ **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}),
1268
+ **({"exact_match": exact_match} if "exact_match" in use_metric else {}),
1269
+ **(
1270
+ {"brier_score": (gold, prob_norm)}
1271
+ if "brier_score" in use_metric
1272
+ else {}
1273
+ ),
1274
+ }
1275
+
1276
+ if "acc_mutual_info" in use_metric:
1277
+ lls_mutual_info = [
1278
+ ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional)
1279
+ ]
1280
+ acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0
1281
+ result_dict["acc_mutual_info"] = acc_mutual_info
1282
+
1283
+ elif self.OUTPUT_TYPE == "generate_until":
1284
+ gold = self.doc_to_target(doc)
1285
+ result = results[0]
1286
+ if self.config.doc_to_choice is not None:
1287
+ # If you set doc_to_choice,
1288
+ # it assumes that doc_to_target returns a number.
1289
+ choices = self.doc_to_choice(doc)
1290
+ gold = choices[gold]
1291
+ # we expect multiple_targets to be a list.
1292
+ elif self.multiple_target:
1293
+ gold = list(gold)
1294
+ elif type(gold) != type(result):
1295
+ # cast gold to the same type as result
1296
+ gold = type(result)(gold)
1297
+
1298
+ for metric in self._metric_fn_list.keys():
1299
+ if self.multiple_target:
1300
+ # in the case where we have multiple targets,
1301
+ # return true if any are true
1302
+ # TODO: this may break for multipLe_target, non zero-or-1 metrics
1303
+ scores = []
1304
+ if not isinstance(gold, list):
1305
+ # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
1306
+ # print(gold)
1307
+ gold = [gold]
1308
+ if metric == "exact_match":
1309
+ result = [result for _ in range(len(gold))]
1310
+ scores = self._metric_fn_list[metric](
1311
+ references=gold,
1312
+ predictions=result,
1313
+ **self._metric_fn_kwargs[metric],
1314
+ )[metric]
1315
+ result_score = 1.0 if scores > 0.0 else 0.0
1316
+ else:
1317
+ for gold_option in gold:
1318
+ try:
1319
+ result_score = self._metric_fn_list[metric](
1320
+ references=[gold_option],
1321
+ predictions=[result],
1322
+ **self._metric_fn_kwargs[metric],
1323
+ )
1324
+ except (
1325
+ TypeError
1326
+ ): # TODO: this is hacky and I don't want to do it
1327
+ result_score = self._metric_fn_list[metric](
1328
+ [gold_option, result]
1329
+ )
1330
+ if isinstance(result_score, dict):
1331
+ # TODO: this handles the case where HF evaluate returns a dict.
1332
+ result_score = result_score[metric]
1333
+ scores.append(result_score)
1334
+ if any(scores):
1335
+ result_score = 1.0
1336
+ else:
1337
+ result_score = 0.0
1338
+ else:
1339
+ try:
1340
+ result_score = self._metric_fn_list[metric](
1341
+ references=[gold],
1342
+ predictions=[result],
1343
+ **self._metric_fn_kwargs[metric],
1344
+ )
1345
+ except TypeError: # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
1346
+ result_score = self._metric_fn_list[metric]([gold, result])
1347
+ if isinstance(result_score, dict):
1348
+ # TODO: this handles the case where HF evaluate returns a dict.
1349
+ result_score = result_score[metric]
1350
+ result_dict[metric] = result_score
1351
+ else:
1352
+ raise ValueError(
1353
+ f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ",
1354
+ "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'",
1355
+ )
1356
+
1357
+ return result_dict
1358
+
1359
+ def aggregation(self) -> dict:
1360
+ return self._aggregation_list
1361
+
1362
+ def higher_is_better(self) -> dict:
1363
+ return self._higher_is_better
1364
+
1365
+ def get_config(self, key: str) -> Any:
1366
+ return getattr(self._config, key, None)
1367
+
1368
+ def __repr__(self):
1369
+ return (
1370
+ f"ConfigurableTask(task_name={getattr(self.config, 'task', None)},"
1371
+ f"group_name={getattr(self.config, 'group', None)},"
1372
+ f"output_type={self.OUTPUT_TYPE},"
1373
+ f"num_fewshot={getattr(self.config, 'num_fewshot', None)},"
1374
+ f"num_samples={len(self.eval_docs)})"
1375
+ )
1376
+
1377
+
1378
+ class MultipleChoiceTask(Task):
1379
+ OUTPUT_TYPE = "loglikelihood"
1380
+
1381
+ def doc_to_target(self, doc: dict) -> str:
1382
+ return " " + doc["choices"][doc["gold"]]
1383
+
1384
+ def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]:
1385
+ # TODO: add mutual info here?
1386
+ return [
1387
+ Instance(
1388
+ request_type="loglikelihood",
1389
+ doc=doc,
1390
+ arguments=(ctx, " {}".format(choice)),
1391
+ idx=i,
1392
+ **kwargs,
1393
+ )
1394
+ for i, choice in enumerate(doc["choices"])
1395
+ ]
1396
+
1397
+ def process_results(self, doc: dict, results: Iterable[Tuple[float, bool]]) -> dict:
1398
+ results = [
1399
+ res[0] for res in results
1400
+ ] # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere?
1401
+ gold = doc["gold"]
1402
+
1403
+ acc = 1.0 if np.argmax(results) == gold else 0.0
1404
+ completion_len = np.array([float(len(i)) for i in doc["choices"]])
1405
+ acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0
1406
+
1407
+ return {
1408
+ "acc": acc,
1409
+ "acc_norm": acc_norm,
1410
+ }
1411
+
1412
+ def higher_is_better(self) -> dict:
1413
+ return {
1414
+ "acc": True,
1415
+ "acc_norm": True,
1416
+ }
1417
+
1418
+ def aggregation(self) -> dict:
1419
+ return {
1420
+ "acc": mean,
1421
+ "acc_norm": mean,
1422
+ }
1423
+
1424
+
1425
+ class PerplexityTask(Task):
1426
+ OUTPUT_TYPE = "loglikelihood_rolling"
1427
+
1428
+ def has_training_docs(self) -> bool:
1429
+ return False
1430
+
1431
+ def fewshot_examples(self, k: int, rnd) -> List:
1432
+ if k != 0:
1433
+ raise ValueError(
1434
+ "The number of fewshot examples must be 0 for perplexity tasks."
1435
+ )
1436
+ return []
1437
+
1438
+ def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]:
1439
+ if num_fewshot != 0:
1440
+ raise ValueError(
1441
+ "The number of fewshot examples must be 0 for perplexity tasks."
1442
+ )
1443
+
1444
+ return ""
1445
+
1446
+ def higher_is_better(self) -> dict:
1447
+ return {
1448
+ "word_perplexity": False,
1449
+ "byte_perplexity": False,
1450
+ "bits_per_byte": False,
1451
+ }
1452
+
1453
+ def doc_to_decontamination_query(self, doc):
1454
+ return doc
1455
+
1456
+ def doc_to_text(self, doc) -> str:
1457
+ return ""
1458
+
1459
+ def doc_to_target(self, doc):
1460
+ return doc
1461
+
1462
+ def construct_requests(self, doc: dict, ctx: Optional[str], **kwargs):
1463
+ if bool(ctx):
1464
+ raise ValueError
1465
+
1466
+ return Instance(
1467
+ request_type=self.OUTPUT_TYPE,
1468
+ doc=doc,
1469
+ arguments=(self.doc_to_target(doc),),
1470
+ idx=0,
1471
+ **kwargs,
1472
+ )
1473
+
1474
+ def process_results(self, doc: dict, results: Tuple[float]) -> dict:
1475
+ (loglikelihood,) = results
1476
+ words = self.count_words(self.doc_to_target(doc))
1477
+ bytes_ = self.count_bytes(self.doc_to_target(doc))
1478
+ return {
1479
+ "word_perplexity": (loglikelihood, words),
1480
+ "byte_perplexity": (loglikelihood, bytes_),
1481
+ "bits_per_byte": (loglikelihood, bytes_),
1482
+ }
1483
+
1484
+ def aggregation(self) -> dict:
1485
+ return {
1486
+ "word_perplexity": weighted_perplexity,
1487
+ "byte_perplexity": weighted_perplexity,
1488
+ "bits_per_byte": bits_per_byte,
1489
+ }
1490
+
1491
+ @classmethod
1492
+ def count_bytes(cls, doc) -> int:
1493
+ return len(doc.encode("utf-8"))
1494
+
1495
+ @classmethod
1496
+ def count_words(cls, doc) -> int:
1497
+ """Downstream tasks with custom word boundaries should override this!"""
1498
+ return len(re.split(r"\s+", doc))
lm-evaluation/build/lib/lm_eval/decontamination/decontaminate.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import glob
3
+ import json
4
+ import os
5
+ import pickle
6
+ import random
7
+ import time
8
+
9
+ from .archiver import ZStdTextReader
10
+ from .janitor import Janitor, word_ngrams
11
+
12
+
13
+ # Was used for testing the evaluator decoupled from the full logic below
14
+ def get_train_overlap_stub(docs: dict, ngrams_path: str, ngrams_n_size: str):
15
+ simulated_overlap = 0.1
16
+ contaminated = int(len(docs) * simulated_overlap)
17
+ return random.sample(range(len(docs)), contaminated)
18
+
19
+
20
+ # Returns a dictionary containing all overlapping documents in each
21
+ # task. In the standard use case, an overlap occurs when any of the 13-grams
22
+ # found in the task document exist in the training set documents.
23
+ #
24
+ # To generate 13-grams for the pile see scripts/clean_training_data. The final output of these
25
+ # scripts are an info.json file containing the n_gram_size (13) and a bunch of "ngrams_{x}.bkt.txt.sorted.zst"
26
+ # files. These should exist in the "ngrams_path" provided to this function.
27
+
28
+
29
+ # Algorithm:
30
+ # 1. Build lookups for each dataset {ngram: list(document_ids)}
31
+ # 2. Merge into an overall lookup {ngram: [(task_name, task_set, doc_ids),]}
32
+ # 3. Full scan the 13-grams from the training set against the merged lookup,
33
+ # saving matches in the "duplicates" dictionary {(task_name, task_set): set(doc_ids)}
34
+ # 4. Strip the task_set from the dictionary keys and return
35
+ #
36
+ # We cache the task+set lookups as well as the overlaps.
37
+ def get_train_overlap(docs_by_task_set: dict, ngrams_path: str, limit: int) -> dict:
38
+ # return get_train_overlap_stub(docs, ngrams_path, ngrams_n_size)
39
+
40
+ info_dict_path = os.path.join(ngrams_path, "info.json")
41
+ info_dict = json.load(open(info_dict_path, "r", encoding="utf-8"))
42
+ ngrams_n_size = info_dict["ngram_size"]
43
+
44
+ janitor = Janitor()
45
+
46
+ # Build lookup for each dataset first in case we use different task combinations later
47
+ print("Building Lookups...")
48
+ start = time.perf_counter()
49
+
50
+ def get_overlaps_dump_path(task_name, task_set, ngrams_n_size, limit) -> str:
51
+ return f"data/{task_name}/{task_set}_{ngrams_n_size}grams_limit{limit}.overlaps"
52
+
53
+ lookups = {}
54
+ duplicates = {} # (task_name, task_set): set(doc_ids)}
55
+ sets_to_decontaminate = len(docs_by_task_set.keys())
56
+
57
+ for (task_name, task_set), docs in docs_by_task_set.items():
58
+ if not os.path.exists(f"data/{task_name}"):
59
+ os.mkdir(f"data/{task_name}")
60
+
61
+ # Check if we've decontaminated this combination before
62
+ overlaps_dump_path = get_overlaps_dump_path(
63
+ task_name, task_set, ngrams_n_size, limit
64
+ )
65
+ if os.path.exists(overlaps_dump_path):
66
+ duplicates[(task_name, task_set)] = pickle.load(
67
+ open(overlaps_dump_path, "rb")
68
+ )
69
+ sets_to_decontaminate -= 1
70
+ continue
71
+ else:
72
+ duplicates[(task_name, task_set)] = set()
73
+
74
+ # Build/load the task lookup {ngram: set(documents)}.
75
+ task_set_lookup_path = (
76
+ f"data/{task_name}/{task_set}_{ngrams_n_size}grams_limit{limit}.lookup"
77
+ )
78
+ if os.path.exists(task_set_lookup_path):
79
+ print(f"{task_set_lookup_path} available, loading...")
80
+ lookups[(task_name, task_set)] = pickle.load(
81
+ open(task_set_lookup_path, "rb")
82
+ )
83
+ else:
84
+ print(f"{task_set_lookup_path} not available, building...")
85
+ lookup = collections.defaultdict(set)
86
+
87
+ for doc_id, document in enumerate(docs):
88
+ ngrams = word_ngrams(janitor.normalize_string(document), ngrams_n_size)
89
+ for ngram in ngrams:
90
+ lookup[ngram].add(doc_id)
91
+
92
+ pickle.dump(lookup, open(task_set_lookup_path, "wb"))
93
+ lookups[(task_name, task_set)] = lookup
94
+
95
+ elapsed = time.perf_counter() - start
96
+ print(f"Building lookups took {elapsed:0.5f} seconds.")
97
+
98
+ matched_ngrams = []
99
+
100
+ if sets_to_decontaminate > 0:
101
+ print("Merging lookups...")
102
+ start = time.perf_counter()
103
+ merged_lookup = collections.defaultdict(list)
104
+ for (task_name, task_set), lookup in lookups.items():
105
+ for ngram, doc_ids in lookup.items():
106
+ merged_lookup[ngram].append((task_name, task_set, doc_ids))
107
+
108
+ elapsed = time.perf_counter() - start
109
+ print(f"Merging lookups took {elapsed:0.5f} seconds.")
110
+
111
+ print(f"{ngrams_n_size} grams files found in {ngrams_path}:")
112
+ files = glob.glob(os.path.join(ngrams_path, "*.sorted.zst"))
113
+ print(files)
114
+
115
+ for file in files:
116
+ start = time.perf_counter()
117
+ print(f"Scanning {file}")
118
+ reader = ZStdTextReader(file)
119
+ total_ngrams = 0
120
+ unique_ngrams = 0
121
+ matching_unique = 0
122
+ non_matching_unique = 0
123
+
124
+ current_ngram = ""
125
+ for line in reader.read_tqdm(): # Scan training set ngrams file
126
+ total_ngrams += 1
127
+ [ngram, document_id] = line.rsplit(" ", 1)
128
+ if (
129
+ ngram != current_ngram
130
+ ): # Only need to match the ngram once in training set
131
+ unique_ngrams += 1
132
+ current_ngram = ngram
133
+ if ngram in merged_lookup:
134
+ matched_ngrams.append(ngram) # For logging
135
+ matching_unique += 1
136
+ for task_name, task_set, doc_ids in merged_lookup[ngram]:
137
+ task_doc_set = duplicates[(task_name, task_set)]
138
+ for doc_id in doc_ids: # Record contamination across all relevant task/set combos
139
+ task_doc_set.add(doc_id)
140
+ del merged_lookup[ngram] # No point matching again
141
+ else:
142
+ non_matching_unique += 1
143
+
144
+ print(f"Total Ngrams: {total_ngrams}")
145
+ print(f"Unique Ngrams: {unique_ngrams}")
146
+ print(f"Unique Matching: {matching_unique}")
147
+ print(f"Unique Non Matching: {non_matching_unique}")
148
+ print("Matched ngrams:")
149
+ for ngram in matched_ngrams:
150
+ print(ngram)
151
+
152
+ elapsed = time.perf_counter() - start
153
+ print(f"Read took {elapsed:0.5f} seconds.")
154
+ print(f"Speed: {(os.path.getsize(file)/1000000.0)/elapsed}MB/second")
155
+
156
+ print(duplicates)
157
+
158
+ # Dump overlaps separately
159
+ for (task_name, task_set), doc_ids in duplicates.items():
160
+ overlaps_dump_path = get_overlaps_dump_path(
161
+ task_name, task_set, ngrams_n_size, limit
162
+ )
163
+ pickle.dump(doc_ids, open(overlaps_dump_path, "wb"))
164
+
165
+ # Strip task set and return
166
+ return {task_name: doc_ids for (task_name, task_set), doc_ids in duplicates.items()}
lm-evaluation/build/lib/lm_eval/decontamination/janitor.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import re
3
+ import string
4
+ import traceback
5
+ from typing import Iterator, List, Sequence, Tuple, TypeVar
6
+
7
+
8
+ # This is a cpp module. Compile janitor_util.cpp with:
9
+ # c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) janitor_util.cpp -o janitor_util$(python3-config --extension-suffix) -undefined dynamic_lookup
10
+ try:
11
+ import janitor_util
12
+
13
+ JANITOR_CPP = True
14
+ except Exception:
15
+ print("WARNING: C++ module could not be loaded. Janitor running in python mode")
16
+ traceback.print_exc()
17
+ JANITOR_CPP = False
18
+
19
+ T = TypeVar("T")
20
+
21
+
22
+ # Implementation from nltk source
23
+ # https://www.nltk.org/_modules/nltk/util.html
24
+ def form_ngrams(sequence: Iterator[T], n: int) -> Iterator[Tuple[T, ...]]:
25
+ history = []
26
+ while n > 1:
27
+ # PEP 479, prevent RuntimeError from being raised when StopIteration bubbles out of generator
28
+ try:
29
+ next_item = next(sequence)
30
+ except StopIteration:
31
+ # no more data, terminate the generator
32
+ return
33
+ history.append(next_item)
34
+ n -= 1
35
+ for item in sequence:
36
+ history.append(item)
37
+ yield tuple(history)
38
+ del history[0]
39
+
40
+
41
+ def word_ngrams(s: str, n: int) -> Iterator[str]:
42
+ """Splits a string into ngram words"""
43
+ tokens = s.split() # not a generator :(
44
+ ngram_seqs = form_ngrams(iter(tokens), n)
45
+ return (" ".join(ngram) for ngram in ngram_seqs)
46
+
47
+
48
+ # Does character sequences only - combined faster function to play around with later
49
+ # def word_ngrams_indices_combined(sequence, n):
50
+ # current_word = ""
51
+ # history = []
52
+ # gap = False;
53
+ # start = 0
54
+ # end = 0
55
+ # for character in sequence:
56
+ # if character == " ":
57
+ # if not gap:
58
+ # gap = True
59
+ # history.append(current_word)
60
+ # end += len(current_word) - 1
61
+ # current_word = ""
62
+ # if len(history) == n:
63
+ # yield (tuple(history), start, end)
64
+ # del history[0]
65
+ # start = end + 1
66
+ # end = start
67
+ # else:
68
+ # gap = False
69
+ # current_word += character
70
+
71
+
72
+ # https://stackoverflow.com/questions/13734451/string-split-with-indices-in-python
73
+ def split_indices(s: str) -> Iterator[Tuple[str, Tuple[int, int]]]:
74
+ """Splits a string on whitespaces and records the indices of each in the original string.
75
+ @:return generator((word, (start_idx, end_idx)), ...)
76
+ """
77
+ return ((m.group(0), (m.start(), m.end() - 1)) for m in re.finditer(r"\S+", s))
78
+
79
+
80
+ def word_ngrams_indices(s: str, n: int) -> Iterator[Tuple[str, Tuple[int, int]]]:
81
+ """Splits a string into pairs of (ngram words, their start/end indices)"""
82
+ tokens_with_indices = split_indices(s)
83
+
84
+ # Generator of ngrams of (word, idx_pairs)
85
+ # (
86
+ # [(word, (start,end)), (word, (start, end))...],
87
+ # [(word, (start, end)), ...],
88
+ # ...
89
+ # )
90
+ ngram_seqs_with_indices = form_ngrams(tokens_with_indices, n)
91
+
92
+ # Generator of pairs of word and index ngrams
93
+ # (
94
+ # ([word, word, ...], [(start,end), (start,end), ...]),
95
+ # ...
96
+ # )
97
+ ngram_indices_pairs = (
98
+ zip(*ngram_with_indices) for ngram_with_indices in ngram_seqs_with_indices
99
+ )
100
+
101
+ # Generator of ( (word_ngram, (start, end)), (word_ngram, start, end)), ...)
102
+ return (
103
+ (" ".join(ngram_seq), (indices[0][0], indices[-1][1]))
104
+ for ngram_seq, indices in ngram_indices_pairs
105
+ )
106
+
107
+
108
+ class Janitor:
109
+ # FIXME delete_chars: Should anything else go here? Special chars?
110
+ def __init__(
111
+ self,
112
+ ngram_n: int = 13,
113
+ window_to_remove: int = 200,
114
+ too_dirty_cutoff: int = 10,
115
+ minimum_slice_length: int = 200,
116
+ delete_chars: str = string.punctuation,
117
+ ) -> None:
118
+ self.ngram_n = ngram_n
119
+ self.window_to_remove = window_to_remove
120
+ self.too_dirty_cutoff = too_dirty_cutoff
121
+ self.minimum_slice_length = minimum_slice_length
122
+ self.delete_chars = delete_chars
123
+
124
+ self.dirt_ngrams = set()
125
+
126
+ # If in python, we'll translate uppercase to lowercase and delete naughty characters.
127
+ # This is fast by python standards
128
+ # https://stackoverflow.com/questions/638893/what-is-the-most-efficient-way-in-python-to-convert-a-string-to-all-lowercase-st
129
+ self.translation_table = str.maketrans(
130
+ string.ascii_lowercase + string.ascii_uppercase, # These characters
131
+ string.ascii_lowercase * 2, # Become these characters
132
+ self.delete_chars, # These are deleted
133
+ )
134
+
135
+ ##############
136
+ # I/O for saving contamination ngrams
137
+ ##############
138
+
139
+ def save_contamination_ngrams(self, filename: str) -> None:
140
+ with open(filename, "wb") as fp:
141
+ pickle.dump(filename, fp)
142
+
143
+ def load_contamination_ngrams(self, filename: str) -> None:
144
+ with open(filename, "rb") as fp:
145
+ self.dirt_ngrams = pickle.load(fp)
146
+
147
+ ##############
148
+ # Call these :)
149
+ ##############
150
+
151
+ def register_contaminant(self, dirt_string: str) -> None:
152
+ """Register a string as contamination to be removed, e.g. a test set
153
+ This breaks the dirt_string into ngrams to store for future cleaning"""
154
+ if JANITOR_CPP:
155
+ return self.register_contaminant_cpp(dirt_string)
156
+ else:
157
+ print("WARNING: Janitor running in python mode")
158
+ return self.register_contaminant_python(dirt_string)
159
+
160
+ def clean(self, dirty_string: str) -> List[str]:
161
+ """Clean a string (e.g. a training set) by removing all ngrams previously
162
+ registered as contaminants. Returns a list of clean chunks, or empty if
163
+ the string was too dirty"""
164
+ if JANITOR_CPP:
165
+ return self.clean_cpp(dirty_string)
166
+ else:
167
+ print("WARNING: Janitor running in python mode")
168
+ return self.clean_python(dirty_string)
169
+
170
+ def _split_chunks(
171
+ self, dirty_string: str, dirty_parts: Sequence[Tuple]
172
+ ) -> List[str]:
173
+ clean_chunks = []
174
+ splice_idx = 0
175
+ end = -1
176
+ for i, (ngram, start, end) in enumerate(dirty_parts):
177
+ if i >= self.too_dirty_cutoff:
178
+ return []
179
+ start = max(0, start - self.window_to_remove)
180
+ end = min(len(dirty_string), end + self.window_to_remove)
181
+
182
+ if start - splice_idx > self.minimum_slice_length:
183
+ clean_chunks.append(dirty_string[splice_idx:start])
184
+ splice_idx = end
185
+
186
+ if end < len(dirty_string) - self.minimum_slice_length:
187
+ clean_chunks.append(dirty_string[end + 1 :])
188
+
189
+ return clean_chunks
190
+
191
+ ##############
192
+ # Fast C++
193
+ ##############
194
+
195
+ def register_contaminant_cpp(self, dirt_string) -> None:
196
+ self.dirt_ngrams.update(
197
+ janitor_util.clean_ngram(dirt_string, self.delete_chars, self.ngram_n)
198
+ )
199
+
200
+ def clean_cpp(self, dirty_string: str) -> List[str]:
201
+ contamination_indices = janitor_util.clean_ngram_with_indices(
202
+ dirty_string, self.delete_chars, self.ngram_n
203
+ )
204
+ return self._split_chunks(dirty_string, contamination_indices)
205
+
206
+ ##############
207
+ # Slow python
208
+ ##############
209
+
210
+ def normalize_string(self, s: str) -> str:
211
+ return s.translate(self.translation_table)
212
+
213
+ def register_contaminant_python(self, dirt_string: str) -> None:
214
+ self.dirt_ngrams.update(
215
+ word_ngrams(self.normalize_string(dirt_string), self.ngram_n)
216
+ )
217
+
218
+ def clean_python(self, dirty_string: str) -> List[str]:
219
+ contamination_indices = (
220
+ (None, *idx_pair)
221
+ for dirty_ngram, idx_pair in word_ngrams_indices(dirty_string, self.ngram_n)
222
+ if self.normalize_string(dirty_ngram) in self.dirt_ngrams
223
+ )
224
+ return self._split_chunks(dirty_string, contamination_indices)
225
+
226
+
227
+ ##################################################################
228
+ # Tests
229
+ #################################################################
230
+
231
+ # def print_cpp():
232
+ # source = """ ,, I'm a very !dirty,, ,, dirty boy. Clean me daddy. \n\nhe he he hehe heh. lastword """ * 2
233
+
234
+ # for i in range(1, 10, 2):
235
+ # pprint(janitor_util.clean_ngram(source, string.punctuation, i))
236
+ # for ngram, start, end in \
237
+ # janitor_util.clean_ngram_with_indices(source, string.punctuation, i):
238
+ # print(ngram, "\t", start, end, source[start:end].replace("\n", "\\n"))
239
+
240
+
241
+ # def test_cpp():
242
+ # source = """ ,, I'm a very !dirty,, ,, dirty boy. Clean me daddy. \n\nhe he he hehe heh. lastword """ * 2
243
+ # contaminant = "dirty boy. Clean he he"
244
+
245
+ # jan_python = Janitor()
246
+ # jan_cpp = Janitor()
247
+
248
+ # jan_python.register_contaminant_python(contaminant)
249
+ # jan_cpp.register_contaminant(contaminant)
250
+
251
+ # assert jan_python.dirt_ngrams == jan_cpp.dirt_ngrams, (jan_python.dirt_ngrams, jan_cpp.dirt_ngrams)
252
+
253
+ # assert jan_python.clean_python(source) == jan_cpp.clean(source), \
254
+ # (jan_python.clean_python(source), jan_cpp.clean(source))
255
+
256
+ # print("Passed test, python==cpp")
257
+
258
+
259
+ # def benchmark():
260
+ # # Download and put in data folder: enwik8 (100 MB) from https://cs.fit.edu/~mmahoney/compression/textdata.html
261
+ # setup = \
262
+ # """
263
+ # with open("data/enwik8", "r") as f:
264
+ # data = f.read()
265
+ # jan = Janitor(too_dirty_cutoff=1000)
266
+ # jan.register_contaminant('''
267
+ # theories is that there is a connection between &quot;geekdom&quot; and autism.
268
+ # This is hinted, for instance, by a ''Wired Magazine'' article in 2001 entitled &quot;
269
+ # The [[Geek]] Syndrome&quot;, which is a point argued by many in the autism rights
270
+ # movement{{ref|Wired}}. This article, many professionals assert, is just one example of
271
+ # the media's application of mental disease labels to what is actually variant normal behavior
272
+ # &amp;mdash;they argue that shyness, lack of athletic ability or social skills, and intellectual
273
+ # interests, even when they seem unusual to others, are not in themselves signs of autism or
274
+ # Asperger's syndrome. Others assert that it is actually the medical profession which is applying
275
+ # mental disease labels to children who in the past would have simply been accepted as a little
276
+ # different or even labeled 'gifted'. See [[clinomorphism]] for further discussion of this issue.
277
+ # Due to the recent publicity surrounding autism and autis
278
+ # ultan Al Nahyan]] granted [[Petroleum]] concessions, and oil was first found in 1958. At first,
279
+ # oil money had a marginal impact. A few lowrise concete buildings were erected, and the first
280
+ # paved road was completed in 1961, but Sheikh Shakbut, uncertain whether the new oil royalties
281
+ # would last, took a cautious approach, preferring to save the revenue rather than investing it in
282
+ # development. His brother, [[Zayed bin Sultan Al Nahayan]], saw that oil wealth had the potential
283
+ # to transform Abu Dhabi. The ruling Al Nahayan family decided that Sheikh Zayed should replace his
284
+ # brother as Ruler and carry out his vision of developing the country. On [[August 6]], [[1966]],
285
+ # with the assistance of the British, Sheikh Zayed became the new ruler. See generally, Al-Fahim, M,
286
+ # ''From Rags to Riches: A Story of Abu Dhabi'', Chapter Six (London Centre of Arab Studies, 1995),
287
+ # ISBN 1 900404 00 1. With the announcement by Britain in 1968 that it would withdraw from the
288
+ # Gulf area by 1971, Sheikh Zayed became the main driving force behind the formation of the
289
+ # [[United Arab Emirates]]. After the Emirates gained independence in 1971,
290
+ # ''')
291
+ # """
292
+
293
+ # n = 1
294
+ # print(f"Timing {n} run on 100 MB")
295
+ # print("Register contaminant")
296
+ # # print("\tPython", timeit.timeit("jan.register_contaminant_python(data)", setup=setup, globals=globals(), number=n))
297
+ # print("\tCpp", timeit.timeit("jan.register_contaminant(data)", setup=setup, globals=globals(), number=n))
298
+
299
+ # print("Clean")
300
+ # # print("\tPython", timeit.timeit("jan.clean_python(data)", setup=setup, globals=globals(), number=n))
301
+ # print("\tCpp", timeit.timeit("jan.clean(data)", setup=setup, globals=globals(), number=n))
302
+
303
+
304
+ # def test_janitor_general():
305
+ # source = """ ,, I'm a very !dirty,, ,, dirty boy. Clean me daddy. \n\nhe he he hehe heh. lastword """ * 2
306
+ # contaminant = "dirty boy. Clean he he"
307
+
308
+ # jan = Janitor(ngram_n=3)
309
+ # jan.register_contaminant(contaminant)
310
+ # cleaned = " ".join(jan.clean(source))
311
+ # for contam in jan.dirt_ngrams:
312
+ # assert contam not in cleaned, contam
313
+
314
+ # filename = "data/saved_contam"
315
+ # jan.save_contamination_ngrams(filename)
316
+
317
+ # jan = Janitor(ngram_n=3)
318
+ # jan.load_contamination_ngrams(filename)
319
+ # cleaned = " ".join(jan.clean(source))
320
+ # for contam in jan.dirt_ngrams:
321
+ # assert contam not in cleaned, contam
322
+
323
+
324
+ # if __name__ == "__main__":
325
+ # test()
326
+ # # print_cpp()
327
+ # # test_cpp()
328
+ # # benchmark()
lm-evaluation/build/lib/lm_eval/filters/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import List, Union
3
+
4
+ from lm_eval.api.filter import FilterEnsemble
5
+
6
+ from . import extraction, selection, transformation
7
+
8
+
9
+ FILTER_REGISTRY = {
10
+ "take_first": selection.TakeFirstFilter,
11
+ "regex": extraction.RegexFilter,
12
+ "majority_vote": selection.MajorityVoteFilter,
13
+ "take_first_k": selection.TakeKFilter,
14
+ "remove_whitespace": extraction.WhitespaceFilter,
15
+ "lowercase": transformation.LowercaseFilter,
16
+ "uppercase": transformation.UppercaseFilter,
17
+ "map": transformation.MapFilter,
18
+ "multi_choice_regex": extraction.MultiChoiceRegexFilter,
19
+ # TODO: implement this filter. either it should take in an arbitrary "scoring"/reward function
20
+ # that takes an input and returns a scalar and then should select the max reward,
21
+ # or should implement different filters for different ways of handling a reward model's inference.
22
+ # "arg_max": selection.ArgMaxFilter,
23
+ }
24
+
25
+
26
+ def get_filter(filter_name: str) -> Union[type, str]:
27
+ if filter_name in FILTER_REGISTRY:
28
+ return FILTER_REGISTRY[filter_name]
29
+ else:
30
+ return filter_name
31
+
32
+
33
+ def build_filter_ensemble(
34
+ filter_name: str, components: List[List[str]]
35
+ ) -> FilterEnsemble:
36
+ """
37
+ Create a filtering pipeline.
38
+ """
39
+ filters = []
40
+ for function, kwargs in components:
41
+ if kwargs is None:
42
+ kwargs = {}
43
+ # create a filter given its name in the registry
44
+ f = partial(get_filter(function), **kwargs)
45
+ # add the filter as a pipeline step
46
+ filters.append(f)
47
+
48
+ return FilterEnsemble(name=filter_name, filters=filters)
lm-evaluation/build/lib/lm_eval/filters/decontamination.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lm_eval.api.filter import Filter
2
+
3
+
4
+ class DecontaminationFilter(Filter):
5
+
6
+ """
7
+ A filter which evaluates
8
+ """
9
+
10
+ name = "track_decontamination"
11
+
12
+ def __init__(self, path) -> None:
13
+ """
14
+
15
+ TODO: make sure only ever run one time on the train set (should this be cached as a class var? keyed by value for "path").
16
+ should further cache result on a given (task_name, doc_id)
17
+ """
18
+ self._decontam_results = None
19
+
20
+ def apply(self, resps, docs) -> None:
21
+ """
22
+ Return {"no_contamination", "only_contamination"} keys for the 2 different subsets
23
+ """
24
+ pass
lm-evaluation/build/lib/lm_eval/filters/extraction.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import sys
3
+ import unicodedata
4
+
5
+ from lm_eval.api.filter import Filter
6
+
7
+
8
+ class RegexFilter(Filter):
9
+ """ """
10
+
11
+ def __init__(
12
+ self,
13
+ regex_pattern: str = r"#### (\-?[0-9\.\,]+)",
14
+ group_select=0,
15
+ fallback: str = "[invalid]",
16
+ ) -> None:
17
+ """
18
+ pass a string `regex` to run `re.compile(r"regex")` on.
19
+ `fallback` defines the output returned if no matches for the regex are located.
20
+ """
21
+ self.regex_pattern = regex_pattern
22
+ self.regex = re.compile(regex_pattern)
23
+ self.group_select = group_select
24
+ self.fallback = fallback
25
+
26
+ def apply(self, resps, docs):
27
+ # here, we assume we have a list, in which each element is
28
+ # a list of model responses for some particular input/target pair.
29
+ # so we process each of these (same input/target response sets)
30
+ # independently (and keep them a list.)
31
+ def filter_set(inst):
32
+ filtered = []
33
+ for resp in inst:
34
+ match = self.regex.findall(resp)
35
+ if match:
36
+ match = match[self.group_select]
37
+ if isinstance(match, tuple):
38
+ match = [m for m in match if m][0]
39
+ match = match.strip()
40
+ else:
41
+ match = self.fallback
42
+ filtered.append(match)
43
+ return filtered
44
+
45
+ # print(resps)
46
+ filtered_resps = list(map(lambda x: filter_set(x), resps))
47
+ # print(filtered_resps)
48
+
49
+ return filtered_resps
50
+
51
+
52
+ class WhitespaceFilter(Filter):
53
+ """ """
54
+
55
+ def __init__(self) -> None:
56
+ pass
57
+
58
+ def apply(self, resps, docs):
59
+ def filter_set(inst):
60
+ filtered_resp = []
61
+ for resp in inst:
62
+ if resp.startswith(" "):
63
+ resp = resp[1:]
64
+
65
+ filtered_resp.append(resp)
66
+
67
+ return filtered_resp
68
+
69
+ filtered_resps = [filter_set(resp) for resp in resps]
70
+
71
+ return filtered_resps
72
+
73
+
74
+ class MultiChoiceRegexFilter(RegexFilter):
75
+ """
76
+ A filter used to extract a model's answer on multiple choice questions with
77
+ letter answers. assumes each document has a "choices" field
78
+ containing the list of answer choices and that the answer label symbols
79
+ are of the form (A), (B), (C), ... or A, B, C.
80
+ """
81
+
82
+ def __init__(
83
+ self,
84
+ regex_pattern: str = r"#### (\-?[0-9\.\,]+)",
85
+ group_select=0,
86
+ fallback: str = "[invalid]",
87
+ ignore_case=False,
88
+ ignore_punctuation=False,
89
+ regexes_to_ignore=None,
90
+ ) -> None:
91
+ """
92
+ regex_pattern: The basic regex pattern to use. If fails to match, we will use the customized match procedure
93
+ - step 1 : We parse the choices between ([A-Z])s then try to find these choices in the response.
94
+ - step 2 : We parse the choice with regex :[\s]*([A-?]), where ? varies by number of choices.
95
+ group_select: Selects the (group_select)th match from the findall result.
96
+ ignore_case: Ignores the case during step 1 matching
97
+ ignore_punctuation: Remove the punctuation during step 1 matching
98
+ regexes_to_ignore: Remove these regexes during step 1 matching
99
+ """
100
+ super().__init__(regex_pattern, group_select, fallback)
101
+ self.ignore_case = ignore_case
102
+ self.ignore_punctuation = ignore_punctuation
103
+ self.regexes_to_ignore = regexes_to_ignore
104
+
105
+ def apply(self, resps, docs):
106
+ # here, we assume we have a list, in which each element is
107
+ # a list of model responses for some particular input/target pair.
108
+ # so we process each of these (same input/target response sets)
109
+ # independently (and keep them a list.)
110
+
111
+ def find_match(regex, resp, convert_dict={}):
112
+ match = regex.findall(resp)
113
+ if match:
114
+ match = match[self.group_select]
115
+ if isinstance(match, tuple):
116
+ match = [m for m in match if m][0]
117
+ match = match.strip()
118
+ if match and match in convert_dict:
119
+ match = convert_dict[match]
120
+ return match
121
+
122
+ punct_tbl = dict.fromkeys(
123
+ i
124
+ for i in range(sys.maxunicode)
125
+ if unicodedata.category(chr(i)).startswith("P")
126
+ )
127
+
128
+ def filter_ignores(st):
129
+ if self.regexes_to_ignore is not None:
130
+ for s in self.regexes_to_ignore:
131
+ st = re.sub(s, "", st)
132
+
133
+ if self.ignore_case:
134
+ st = st.lower()
135
+
136
+ if self.ignore_punctuation:
137
+ # https://stackoverflow.com/a/266162
138
+ st = st.translate(punct_tbl)
139
+ return st
140
+
141
+ filtered_resps = []
142
+
143
+ for r, doc in zip(resps, docs):
144
+ fallback_regexes = []
145
+ choice_to_alpha = {}
146
+ next_alpha = "A"
147
+
148
+ without_paren_fallback_regexes = []
149
+ without_paren_to_target = {}
150
+
151
+ choices = doc["choices"]
152
+ for c in choices:
153
+ m = filter_ignores(c.strip())
154
+ fallback_regexes.append(f"{re.escape(m)}")
155
+ choice_to_alpha[m] = f"({next_alpha})"
156
+
157
+ without_paren_fallback_regexes.append(next_alpha)
158
+ without_paren_to_target[next_alpha] = f"({next_alpha})"
159
+
160
+ next_alpha = chr(ord(next_alpha) + 1)
161
+ fallback_regex = re.compile("|".join(fallback_regexes))
162
+ without_paren_fallback_regex = "|".join(without_paren_fallback_regexes)
163
+ without_paren_fallback_regex = re.compile(
164
+ f":[\s]*({without_paren_fallback_regex})"
165
+ )
166
+
167
+ filtered = []
168
+ for resp in r:
169
+ match = find_match(self.regex, resp)
170
+ if not match:
171
+ match = find_match(
172
+ fallback_regex, filter_ignores(resp), choice_to_alpha
173
+ )
174
+ if not match:
175
+ match = find_match(
176
+ without_paren_fallback_regex, resp, without_paren_to_target
177
+ )
178
+ if not match:
179
+ match = self.fallback
180
+ filtered.append(match)
181
+ filtered_resps.append(filtered)
182
+
183
+ return filtered_resps
lm-evaluation/build/lib/lm_eval/filters/selection.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import Counter
2
+
3
+ from lm_eval.api.filter import Filter
4
+
5
+
6
+ class TakeFirstFilter(Filter):
7
+ def __init__(self) -> None:
8
+ """
9
+ Can define custom behavior here, if an individual instantiation of a Filter class should have state.
10
+ """
11
+
12
+ def apply(self, resps, docs):
13
+ """
14
+ Assuming each entry of `resps` is a list of model responses, we discard all but the first response.
15
+ """
16
+ return map(lambda r: r[0], resps)
17
+
18
+
19
+ class TakeKFilter(Filter):
20
+ def __init__(self, **kwargs) -> None:
21
+ self.k = kwargs.pop("k")
22
+
23
+ super().__init__(**kwargs)
24
+
25
+ def apply(self, resps, docs):
26
+ # need resp to be subscriptable to check below
27
+ resps = list(resps)
28
+ # check we have at least k responses per doc, else we can't take the first k
29
+ assert (
30
+ len(resps[0]) >= self.k
31
+ ), f"Need at least {self.k} responses per doc to take first {self.k}, but got {len(resps[0])} only! Please increase TaskConfig.repeats ."
32
+ return map(lambda r: r[: self.k], resps)
33
+
34
+
35
+ class MajorityVoteFilter(Filter):
36
+ def __init__(self) -> None:
37
+ """
38
+ Can define custom behavior here, if an individual instantiation of a Filter class should have state.
39
+ """
40
+
41
+ def apply(self, resps, docs):
42
+ """
43
+ Each entry of `resps` is a list of model responses.
44
+ We select the response that occurs most frequently in each entry of `resps`.
45
+ """
46
+
47
+ def select_majority(resp):
48
+ counts = Counter(resp)
49
+ vote = counts.most_common(1)[0][0]
50
+ return vote
51
+
52
+ return map(lambda r: [select_majority(r)], resps)
lm-evaluation/build/lib/lm_eval/filters/transformation.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lm_eval.api.filter import Filter
2
+
3
+
4
+ class LowercaseFilter(Filter):
5
+ def __init__(self) -> None:
6
+ pass
7
+
8
+ def apply(self, resps, docs):
9
+ def filter_set(inst):
10
+ return [resp.lower() for resp in inst]
11
+
12
+ return [filter_set(resp) for resp in resps]
13
+
14
+
15
+ class UppercaseFilter(Filter):
16
+ def __init__(self) -> None:
17
+ pass
18
+
19
+ def apply(self, resps, docs):
20
+ def filter_set(inst):
21
+ return [resp.upper() for resp in inst]
22
+
23
+ return [filter_set(resp) for resp in resps]
24
+
25
+
26
+ class MapFilter(Filter):
27
+ def __init__(self, mapping_dict: dict = None, default_value=None) -> None:
28
+ """
29
+ Initializes the MapFilter with a given mapping dictionary and default value.
30
+
31
+ Args:
32
+ - mapping_dict (dict): A dictionary containing the key-value mappings.
33
+ Default is an empty dictionary.
34
+ - default_value (Any): The value to be returned when a key is not found in the mapping_dict.
35
+ Default is None.
36
+
37
+ Example:
38
+ mapper = MapFilter({'A': 1, 'B': 2}, default_value=0)
39
+ """
40
+ if mapping_dict is None:
41
+ mapping_dict = {}
42
+ assert isinstance(
43
+ mapping_dict, dict
44
+ ), "Provided mapping_dict is not a dictionary"
45
+ self.mapping_dict = mapping_dict
46
+ self.default_value = default_value
47
+
48
+ def apply(self, resps, docs):
49
+ def filter_set(inst):
50
+ return [self.mapping_dict.get(resp, self.default_value) for resp in inst]
51
+
52
+ return [filter_set(resp) for resp in resps]
lm-evaluation/build/lib/lm_eval/models/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import (
2
+ anthropic_llms,
3
+ dummy,
4
+ gguf,
5
+ huggingface,
6
+ mamba_lm,
7
+ nemo_lm,
8
+ neuron_optimum,
9
+ openai_completions,
10
+ optimum_lm,
11
+ textsynth,
12
+ vllm_causallms,
13
+ )
14
+
15
+
16
+ # TODO: implement __all__
17
+
18
+
19
+ try:
20
+ # enable hf hub transfer if available
21
+ import hf_transfer # type: ignore # noqa
22
+ import huggingface_hub.constants # type: ignore
23
+
24
+ huggingface_hub.constants.HF_HUB_ENABLE_HF_TRANSFER = True
25
+ except ImportError:
26
+ pass
lm-evaluation/build/lib/lm_eval/models/anthropic_llms.py ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Tuple
2
+
3
+ from tqdm import tqdm
4
+
5
+ from lm_eval import utils
6
+ from lm_eval.api.model import LM
7
+ from lm_eval.api.registry import register_model
8
+ from lm_eval.models.utils import retry_on_specific_exceptions
9
+
10
+
11
+ eval_logger = utils.eval_logger
12
+
13
+
14
+ def anthropic_completion(
15
+ client, #: anthropic.Anthropic,
16
+ model: str,
17
+ prompt: str,
18
+ max_tokens_to_sample: int,
19
+ temperature: float,
20
+ stop: List[str],
21
+ **kwargs: Any,
22
+ ) -> str:
23
+ """Wrapper function around the Anthropic completion API client with exponential back-off
24
+ in case of RateLimitError.
25
+
26
+ params:
27
+ client: anthropic.Anthropic
28
+ Anthropic API client
29
+ model: str
30
+ Anthropic model e.g. 'claude-instant-v1', 'claude-2'
31
+ prompt: str
32
+ Prompt to feed to the model
33
+ max_tokens_to_sample: int
34
+ Maximum number of tokens to sample from the model
35
+ temperature: float
36
+ Sampling temperature
37
+ stop: List[str]
38
+ List of stop sequences
39
+ kwargs: Any
40
+ Additional model_args to pass to the API client
41
+ """
42
+
43
+ try:
44
+ import anthropic
45
+ except ModuleNotFoundError:
46
+ raise Exception(
47
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
48
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
49
+ )
50
+
51
+ def _exception_callback(e: Exception, sleep_time: float) -> None:
52
+ eval_logger.warning(
53
+ f"RateLimitError occurred: {e.__cause__}\n Retrying in {sleep_time} seconds"
54
+ )
55
+
56
+ @retry_on_specific_exceptions(
57
+ on_exceptions=[anthropic.RateLimitError],
58
+ max_retries=None, # retry forever, consider changing
59
+ on_exception_callback=_exception_callback,
60
+ )
61
+ def completion():
62
+ response = client.completions.create(
63
+ prompt=f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}",
64
+ model=model,
65
+ # NOTE: Claude really likes to do CoT, and overly aggressive stop sequences
66
+ # (e.g. gsm8k's ":") may truncate a lot of the input.
67
+ stop_sequences=[anthropic.HUMAN_PROMPT] + stop,
68
+ max_tokens_to_sample=max_tokens_to_sample,
69
+ temperature=temperature,
70
+ **kwargs,
71
+ )
72
+ return response.completion
73
+
74
+ return completion()
75
+
76
+
77
+ def anthropic_chat(
78
+ client, #: anthropic.Anthropic,
79
+ model: str,
80
+ prompt: str,
81
+ max_tokens: int,
82
+ temperature: float,
83
+ stop: List[str],
84
+ **kwargs: Any,
85
+ ) -> str:
86
+ """Wrapper function around the Anthropic completion API client with exponential back-off
87
+ in case of RateLimitError.
88
+
89
+ params:
90
+ client: anthropic.Anthropic
91
+ Anthropic API client
92
+ model: str
93
+ Anthropic model e.g. 'claude-3-opus-20240229', 'claude-3-sonnet-20240229'
94
+ prompt: str
95
+ Prompt to feed to the model
96
+ max_tokens: int
97
+ Maximum number of tokens to sample from the model
98
+ temperature: float
99
+ Sampling temperature
100
+ stop: List[str]
101
+ List of stop sequences
102
+ kwargs: Any
103
+ Additional model_args to pass to the API client
104
+ """
105
+
106
+ try:
107
+ import anthropic
108
+ except ModuleNotFoundError:
109
+ raise Exception(
110
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
111
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
112
+ )
113
+
114
+ def _exception_callback(e: Exception, sleep_time: float) -> None:
115
+ eval_logger.warning(
116
+ f"RateLimitError occurred: {e.__cause__}\n Retrying in {sleep_time} seconds"
117
+ )
118
+
119
+ @retry_on_specific_exceptions(
120
+ on_exceptions=[
121
+ anthropic.RateLimitError,
122
+ anthropic.APIConnectionError,
123
+ anthropic.APIStatusError,
124
+ ],
125
+ max_retries=None, # retry forever, consider changing
126
+ on_exception_callback=_exception_callback,
127
+ )
128
+ def messages():
129
+ response = client.messages.create(
130
+ model=model,
131
+ max_tokens=max_tokens,
132
+ temperature=temperature,
133
+ messages=[{"role": "user", "content": f"{prompt}"}],
134
+ **kwargs,
135
+ )
136
+ return response.content[0].text
137
+
138
+ return messages()
139
+
140
+
141
+ @register_model("anthropic")
142
+ class AnthropicLM(LM):
143
+ REQ_CHUNK_SIZE = 20 # TODO: not used
144
+
145
+ def __init__(
146
+ self,
147
+ batch_size: int = 1,
148
+ model: str = "claude-2.0",
149
+ max_tokens_to_sample: int = 256,
150
+ temperature: float = 0, # defaults to 1
151
+ **kwargs, # top_p, top_k, etc.
152
+ ) -> None:
153
+ """Anthropic API wrapper.
154
+
155
+ :param model: str
156
+ Anthropic model e.g. 'claude-instant-v1', 'claude-2'
157
+ :param max_tokens_to_sample: int
158
+ Maximum number of tokens to sample from the model
159
+ :param temperature: float
160
+ Sampling temperature
161
+ :param kwargs: Any
162
+ Additional model_args to pass to the API client
163
+ """
164
+ super().__init__()
165
+
166
+ try:
167
+ import anthropic
168
+ except ModuleNotFoundError:
169
+ raise Exception(
170
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
171
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
172
+ )
173
+
174
+ self.model = model
175
+ # defaults to os.environ.get("ANTHROPIC_API_KEY")
176
+ self.client = anthropic.Anthropic()
177
+ self.temperature = temperature
178
+ self.max_tokens_to_sample = max_tokens_to_sample
179
+ self.tokenizer = self.client.get_tokenizer()
180
+ self.kwargs = kwargs
181
+
182
+ @property
183
+ def eot_token_id(self):
184
+ # Not sure but anthropic.HUMAN_PROMPT ?
185
+ raise NotImplementedError("No idea about anthropic tokenization.")
186
+
187
+ @property
188
+ def max_length(self) -> int:
189
+ return 2048
190
+
191
+ @property
192
+ def max_gen_toks(self) -> int:
193
+ return self.max_tokens_to_sample
194
+
195
+ @property
196
+ def batch_size(self):
197
+ # Isn't used because we override _loglikelihood_tokens
198
+ raise NotImplementedError("No support for logits.")
199
+
200
+ @property
201
+ def device(self):
202
+ # Isn't used because we override _loglikelihood_tokens
203
+ raise NotImplementedError("No support for logits.")
204
+
205
+ def tok_encode(self, string: str) -> List[int]:
206
+ return self.tokenizer.encode(string).ids
207
+
208
+ def tok_decode(self, tokens: List[int]) -> str:
209
+ return self.tokenizer.decode(tokens)
210
+
211
+ def _loglikelihood_tokens(self, requests, disable_tqdm: bool = False):
212
+ raise NotImplementedError("No support for logits.")
213
+
214
+ def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]:
215
+ try:
216
+ import anthropic
217
+ except ModuleNotFoundError:
218
+ raise Exception(
219
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
220
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
221
+ )
222
+
223
+ if not requests:
224
+ return []
225
+
226
+ _requests: List[Tuple[str, dict]] = [req.args for req in requests]
227
+
228
+ res = []
229
+ for request in tqdm(_requests, disable=disable_tqdm):
230
+ try:
231
+ inp = request[0]
232
+ request_args = request[1]
233
+ # generation_kwargs
234
+ until = request_args.get("until")
235
+ max_gen_toks = request_args.get("max_gen_toks", self.max_length)
236
+ temperature = request_args.get("temperature", self.temperature)
237
+ response = anthropic_completion(
238
+ client=self.client,
239
+ model=self.model,
240
+ prompt=inp,
241
+ max_tokens_to_sample=max_gen_toks,
242
+ temperature=temperature, # TODO: implement non-greedy sampling for Anthropic
243
+ stop=until, # type: ignore
244
+ **self.kwargs,
245
+ )
246
+ res.append(response)
247
+
248
+ self.cache_hook.add_partial("generate_until", request, response)
249
+ except anthropic.APIConnectionError as e: # type: ignore # noqa: F821
250
+ eval_logger.critical(f"Server unreachable: {e.__cause__}")
251
+ break
252
+ except anthropic.APIStatusError as e: # type: ignore # noqa: F821
253
+ eval_logger.critical(f"API error {e.status_code}: {e.message}")
254
+ break
255
+
256
+ return res
257
+
258
+ def _model_call(self, inps):
259
+ # Isn't used because we override _loglikelihood_tokens
260
+ raise NotImplementedError()
261
+
262
+ def _model_generate(self, context, max_length, eos_token_id):
263
+ # Isn't used because we override generate_until
264
+ raise NotImplementedError()
265
+
266
+ def loglikelihood(self, requests, disable_tqdm: bool = False):
267
+ raise NotImplementedError("No support for logits.")
268
+
269
+ def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
270
+ raise NotImplementedError("No support for logits.")
271
+
272
+
273
+ @register_model("anthropic-chat", "anthropic-chat-completions")
274
+ class AnthropicChatLM(AnthropicLM):
275
+ REQ_CHUNK_SIZE = 20 # TODO: not used
276
+
277
+ def __init__(
278
+ self,
279
+ model: str,
280
+ batch_size: int = 1,
281
+ max_tokens: int = 256,
282
+ temperature: float = 0, # defaults to 1
283
+ **kwargs, # top_p, top_k, etc.
284
+ ) -> None:
285
+ """Anthropic API wrapper.
286
+
287
+ :param model: str
288
+ Anthropic model e.g. 'claude-3-opus-20240229', 'claude-3-sonnet-20240229'
289
+ :param max_tokens: int
290
+ Maximum number of tokens to sample from the model
291
+ :param temperature: float
292
+ Sampling temperature
293
+ :param kwargs: Any
294
+ Additional model_args to pass to the API client
295
+ """
296
+ super().__init__()
297
+
298
+ try:
299
+ import anthropic
300
+ except ModuleNotFoundError:
301
+ raise Exception(
302
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
303
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
304
+ )
305
+
306
+ self.model = model
307
+ # defaults to os.environ.get("ANTHROPIC_API_KEY")
308
+ self.client = anthropic.Anthropic()
309
+ self.temperature = temperature
310
+ self.max_token = max_tokens
311
+ self.tokenizer = self.client.get_tokenizer()
312
+ self.kwargs = kwargs
313
+
314
+ @property
315
+ def max_gen_toks(self) -> int:
316
+ return self.max_tokens
317
+
318
+ def generate_until(self, requests) -> List[str]:
319
+ try:
320
+ import anthropic
321
+ except ModuleNotFoundError:
322
+ raise Exception(
323
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
324
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
325
+ )
326
+
327
+ if not requests:
328
+ return []
329
+
330
+ _requests: List[Tuple[str, dict]] = [req.args for req in requests]
331
+
332
+ res = []
333
+ for request in tqdm(_requests):
334
+ try:
335
+ inp = request[0]
336
+ request_args = request[1]
337
+ # generation_kwargs
338
+ until = request_args.get("until")
339
+ max_tokens = request_args.get("max_gen_toks", self.max_length)
340
+ temperature = request_args.get("temperature", self.temperature)
341
+ response = anthropic_chat(
342
+ client=self.client,
343
+ model=self.model,
344
+ prompt=inp,
345
+ max_tokens=max_tokens,
346
+ temperature=temperature, # TODO: implement non-greedy sampling for Anthropic
347
+ stop=until, # type: ignore
348
+ **self.kwargs,
349
+ )
350
+ res.append(response)
351
+
352
+ self.cache_hook.add_partial("generate_until", request, response)
353
+ except anthropic.APIConnectionError as e: # type: ignore # noqa: F821
354
+ eval_logger.critical(f"Server unreachable: {e.__cause__}")
355
+ break
356
+ except anthropic.APIStatusError as e: # type: ignore # noqa: F821
357
+ eval_logger.critical(f"API error {e.status_code}: {e.message}")
358
+ break
359
+
360
+ return res
lm-evaluation/build/lib/lm_eval/models/dummy.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ from tqdm import tqdm
4
+
5
+ from lm_eval.api.model import LM
6
+ from lm_eval.api.registry import register_model
7
+
8
+
9
+ @register_model("dummy")
10
+ class DummyLM(LM):
11
+ def __init__(self) -> None:
12
+ super().__init__()
13
+
14
+ @classmethod
15
+ def create_from_arg_string(cls, arg_string, additional_config=None):
16
+ return cls()
17
+
18
+ def loglikelihood(self, requests, disable_tqdm: bool = False):
19
+ res = []
20
+
21
+ for _ in tqdm(requests, disable=disable_tqdm):
22
+ res.append((-random.random(), False))
23
+
24
+ return res
25
+
26
+ def generate_until(self, requests, disable_tqdm: bool = False):
27
+ res = []
28
+
29
+ for ctx, _ in tqdm(requests, disable=disable_tqdm):
30
+ res.append("lol")
31
+ assert ctx.strip() != ""
32
+
33
+ return res
34
+
35
+ def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
36
+ res = []
37
+
38
+ for _ in tqdm(requests, disable=disable_tqdm):
39
+ res.append(-random.random())
40
+
41
+ return res
lm-evaluation/build/lib/lm_eval/models/gguf.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import time
3
+
4
+ import requests
5
+ from requests.exceptions import RequestException
6
+ from tqdm import tqdm
7
+
8
+ from lm_eval.api.model import LM
9
+ from lm_eval.api.registry import register_model
10
+
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ def get_result(logprobs, context_length):
16
+ is_greedy = True
17
+ offsets = logprobs["text_offset"]
18
+ tokens = logprobs["tokens"]
19
+ tokens_logprobs = logprobs["token_logprobs"]
20
+
21
+ idx = 0
22
+ while offsets[idx] < context_length:
23
+ idx += 1
24
+ continuation_logprobs = sum(tokens_logprobs[idx:-1])
25
+ for i in range(idx, len(tokens)):
26
+ token = tokens[i]
27
+ top_tokens = logprobs["top_logprobs"][i]
28
+ top_token = max(top_tokens.keys(), key=lambda x: top_tokens[x])
29
+ if top_token != token:
30
+ is_greedy = False
31
+ break
32
+
33
+ return continuation_logprobs, is_greedy
34
+
35
+
36
+ @register_model("gguf", "ggml")
37
+ class GGUFLM(LM):
38
+ def __init__(self, base_url=None, max_length=2048, **kwargs):
39
+ super().__init__()
40
+ self.base_url = base_url
41
+ assert self.base_url, "must pass `base_url` to use GGUF LM!"
42
+ self.logprobs = 10
43
+ self.temperature = 0.0
44
+ self.max_length = max_length
45
+
46
+ def gguf_completion(
47
+ self, context, continuation=None, stop=None, retries=3, delay=5, **kwargs
48
+ ):
49
+ for _ in range(retries):
50
+ try:
51
+ prompt = context
52
+ request = {
53
+ "prompt": prompt,
54
+ "logprobs": self.logprobs,
55
+ "temperature": self.temperature,
56
+ }
57
+ if continuation:
58
+ prompt += continuation
59
+ request.update({"prompt": prompt, "max_tokens": 1, "echo": True})
60
+ if stop is not None:
61
+ request["stop"] = stop
62
+ response = requests.post(
63
+ f"{self.base_url}/v1/completions", json=request
64
+ )
65
+ response.raise_for_status()
66
+ return response.json()
67
+ except RequestException as e:
68
+ logger.error(f"RequestException: {e}")
69
+ time.sleep(delay) # wait before retrying
70
+ else:
71
+ raise Exception(f"Failed to get a valid response after {retries} retries.")
72
+
73
+ def loglikelihood(self, requests, disable_tqdm: bool = False):
74
+ if not requests:
75
+ return []
76
+ res = []
77
+ for context, continuation in tqdm(
78
+ [req.args for req in requests], disable=disable_tqdm
79
+ ):
80
+ response = self.gguf_completion(context=context, continuation=continuation)
81
+ if response and "choices" in response and response["choices"]:
82
+ choice = response["choices"][0]
83
+ logprobs = choice.get("logprobs")
84
+ if (
85
+ logprobs
86
+ and "token_logprobs" in logprobs
87
+ and logprobs["token_logprobs"]
88
+ ):
89
+ logprob, is_greedy = get_result(logprobs, len(context))
90
+ res.append((logprob, is_greedy))
91
+ else:
92
+ logger.warning(
93
+ "Invalid logprobs data. Expected 'logprobs' to contain 'token_logprobs' list."
94
+ )
95
+ else:
96
+ logger.error(
97
+ f"Invalid response for loglikelihood. Response: {response}"
98
+ )
99
+ assert False
100
+ return res
101
+
102
+ def generate_until(self, requests, disable_tqdm: bool = False):
103
+ if not requests:
104
+ return []
105
+
106
+ res = []
107
+ for request in tqdm([req.args for req in requests], disable=disable_tqdm):
108
+ inp = request[0]
109
+ request_args = request[1]
110
+ until = request_args.get("until", ["</s>"])
111
+ response = self.gguf_completion(context=inp, stop=until)
112
+ if response and "choices" in response and response["choices"]:
113
+ choice = response["choices"][0]
114
+ if "text" in choice:
115
+ generated_text = choice["text"].strip()
116
+ res.append(generated_text)
117
+ else:
118
+ logger.error(
119
+ f"Invalid response for greedy_until. Response: {response}"
120
+ )
121
+ res.append(None) # Add default value in case of error
122
+ else:
123
+ logger.error(f"Invalid response for greedy_until. Response: {response}")
124
+ res.append(None) # Add default value in case of error
125
+ return res
126
+
127
+ def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
128
+ raise NotImplementedError(
129
+ "loglikelihood_rolling not yet supported for GGUF models"
130
+ )
lm-evaluation/build/lib/lm_eval/models/huggingface.py ADDED
@@ -0,0 +1,1243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import os
3
+ from datetime import timedelta
4
+ from pathlib import Path
5
+ from typing import List, Literal, Optional, Tuple, Union
6
+
7
+ import torch
8
+ import torch.nn.functional as F
9
+ import transformers
10
+ from accelerate import (
11
+ Accelerator,
12
+ DistributedType,
13
+ InitProcessGroupKwargs,
14
+ find_executable_batch_size,
15
+ )
16
+ from packaging import version
17
+ from peft import PeftModel
18
+ from peft import __version__ as PEFT_VERSION
19
+ from tqdm import tqdm
20
+ from transformers.models.auto.modeling_auto import (
21
+ MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
22
+ MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
23
+ )
24
+
25
+ from lm_eval import utils
26
+ from lm_eval.api.instance import Instance
27
+ from lm_eval.api.model import TemplateLM
28
+ from lm_eval.api.registry import register_model
29
+ from lm_eval.models.utils import (
30
+ Collator,
31
+ clear_torch_cache,
32
+ get_dtype,
33
+ pad_and_concat,
34
+ stop_sequences_criteria,
35
+ )
36
+
37
+
38
+ eval_logger = utils.eval_logger
39
+
40
+
41
+ def _get_accelerate_args(
42
+ device_map_option: Optional[str] = "auto",
43
+ max_memory_per_gpu: Optional[Union[int, str]] = None,
44
+ max_cpu_memory: Optional[Union[int, str]] = None,
45
+ offload_folder: Optional[str] = "./offload",
46
+ ) -> dict:
47
+ """Returns the kwargs needed to apply `accelerate` in `AutoModel.from_pretrained`."""
48
+ max_memory = {}
49
+ if max_memory_per_gpu is not None:
50
+ max_memory_per_gpu_map = {
51
+ device_idx: max_memory_per_gpu
52
+ for device_idx in range(torch.cuda.device_count())
53
+ }
54
+ max_memory.update(max_memory_per_gpu_map)
55
+ if max_cpu_memory is not None:
56
+ max_memory["cpu"] = max_cpu_memory
57
+
58
+ args = {}
59
+ if max_memory:
60
+ args["max_memory"] = max_memory
61
+ args["device_map"] = device_map_option
62
+ args["offload_folder"] = offload_folder
63
+ return args
64
+
65
+
66
+ @register_model("hf-auto", "hf", "huggingface")
67
+ class HFLM(TemplateLM):
68
+ """
69
+ An abstracted Huggingface model class. Enables usage with both models of
70
+ `transformers.AutoModelForCausalLM` and `transformers.AutoModelForSeq2SeqLM` classes.
71
+
72
+ Supports data-parallel multi-GPU with HF Accelerate.
73
+ """
74
+
75
+ AUTO_MODEL_CLASS = None
76
+ _DEFAULT_MAX_LENGTH = 2048
77
+
78
+ def __init__(
79
+ self,
80
+ pretrained: Optional[Union[str, transformers.PreTrainedModel]] = "gpt2",
81
+ backend: Optional[Literal["default", "causal", "seq2seq"]] = "default",
82
+ # override whether the model should be treated as decoder-only (causal) or encoder-decoder (seq2seq)
83
+ revision: Optional[str] = "main",
84
+ subfolder: Optional[str] = None,
85
+ tokenizer: Optional[
86
+ Union[
87
+ str,
88
+ transformers.PreTrainedTokenizer,
89
+ transformers.PreTrainedTokenizerFast,
90
+ ]
91
+ ] = None,
92
+ truncation: Optional[bool] = False,
93
+ logits_cache: bool = True,
94
+ max_length: Optional[int] = None,
95
+ device: Optional[str] = "cuda",
96
+ dtype: Optional[Union[str, torch.dtype]] = "auto",
97
+ batch_size: Optional[Union[int, str]] = 1,
98
+ max_batch_size: Optional[int] = 64,
99
+ trust_remote_code: Optional[bool] = False,
100
+ use_fast_tokenizer: Optional[bool] = True,
101
+ add_bos_token: Optional[bool] = False,
102
+ prefix_token_id: Optional[int] = None,
103
+ # arguments used for splitting a model across GPUs naively.
104
+ # only used if `parallelize=True`.
105
+ parallelize: Optional[bool] = False,
106
+ device_map_option: Optional[str] = "auto",
107
+ max_memory_per_gpu: Optional[Union[int, str]] = None,
108
+ max_cpu_memory: Optional[Union[int, str]] = None,
109
+ offload_folder: Optional[Union[str, os.PathLike]] = "./offload",
110
+ # PEFT and quantization options
111
+ peft: Optional[str] = None,
112
+ autogptq: Optional[Union[bool, str]] = False,
113
+ **kwargs,
114
+ ) -> None:
115
+ super().__init__()
116
+
117
+ # optionally: take in an already-initialized transformers.PreTrainedModel
118
+ if not isinstance(pretrained, str):
119
+ eval_logger.warning(
120
+ "`pretrained` model kwarg is not of type `str`. Many other model arguments may be ignored. Please do not launch via accelerate or use `parallelize=True` if passing an existing model this way."
121
+ )
122
+ assert not parallelize, "`parallelize=True` is not compatible with passing pre-initialized model to `pretrained`"
123
+ self._model = pretrained
124
+ self._device = self._model.device
125
+ self._config = self._model.config
126
+ gpus = 0
127
+
128
+ if tokenizer:
129
+ assert isinstance(
130
+ tokenizer, transformers.PreTrainedTokenizer
131
+ ) or isinstance(tokenizer, transformers.PreTrainedTokenizerFast)
132
+ self.tokenizer = tokenizer
133
+ else:
134
+ # Get tokenizer
135
+ model_name = self._model.name_or_path
136
+ self.tokenizer = transformers.AutoTokenizer.from_pretrained(
137
+ model_name,
138
+ revision=revision,
139
+ trust_remote_code=trust_remote_code,
140
+ use_fast=use_fast_tokenizer,
141
+ )
142
+
143
+ else:
144
+ assert isinstance(device, str)
145
+ assert isinstance(pretrained, str)
146
+ assert isinstance(batch_size, (int, str))
147
+
148
+ gpus = torch.cuda.device_count()
149
+ accelerator_kwargs = InitProcessGroupKwargs(timeout=timedelta(weeks=52))
150
+ accelerator = Accelerator(kwargs_handlers=[accelerator_kwargs])
151
+ if accelerator.num_processes > 1:
152
+ self.accelerator = accelerator
153
+
154
+ if not (parallelize or accelerator.num_processes > 1):
155
+ # use user-passed device
156
+ device_list = set(
157
+ ["cuda", "cpu"]
158
+ + [f"cuda:{i}" for i in range(torch.cuda.device_count())]
159
+ + ["mps", "mps:0"]
160
+ )
161
+ if device and device in device_list:
162
+ self._device = torch.device(device)
163
+ eval_logger.info(f"Using device '{device}'")
164
+ if device in ("mps", "mps:0") and version.parse(
165
+ torch.__version__
166
+ ) < version.parse("2.1"):
167
+ raise RuntimeError(
168
+ f"mps requires torch >= 2.1. You have {torch.__version__}"
169
+ )
170
+ else:
171
+ eval_logger.info("Device not specified")
172
+ eval_logger.info(f"Cuda Available? {torch.cuda.is_available()}")
173
+ self._device = (
174
+ torch.device("cuda")
175
+ if torch.cuda.is_available()
176
+ else torch.device("cpu")
177
+ )
178
+ else:
179
+ if device != "cuda":
180
+ eval_logger.info(
181
+ f"Using `accelerate launch` or `parallelize=True`, device '{device}' will be overridden when placing model."
182
+ )
183
+ # TODO: include in warning that `load_in_8bit` etc. affect this too
184
+ self._device = torch.device(device)
185
+
186
+ # TODO: update this to be less of a hack once subfolder is fixed in HF
187
+ revision = revision + ("/" + subfolder if subfolder is not None else "")
188
+
189
+ self._get_config(
190
+ pretrained,
191
+ revision=revision,
192
+ trust_remote_code=trust_remote_code,
193
+ )
194
+
195
+ # determine which of 'causal' and 'seq2seq' backends to use
196
+ self._get_backend(
197
+ config=self.config, backend=backend, trust_remote_code=trust_remote_code
198
+ )
199
+
200
+ # if we passed `pretrained` as a string, initialize our model now
201
+ if isinstance(pretrained, str):
202
+ self._create_model(
203
+ pretrained=pretrained,
204
+ revision=revision,
205
+ dtype=dtype,
206
+ trust_remote_code=trust_remote_code,
207
+ parallelize=parallelize,
208
+ device_map_option=device_map_option,
209
+ max_memory_per_gpu=max_memory_per_gpu,
210
+ max_cpu_memory=max_cpu_memory,
211
+ offload_folder=offload_folder,
212
+ peft=peft,
213
+ autogptq=autogptq,
214
+ **kwargs,
215
+ )
216
+
217
+ # access self._model through self.model property outside this method
218
+ if isinstance(self.model, torch.nn.Module):
219
+ self.model.eval()
220
+ self.model.tie_weights()
221
+
222
+ if isinstance(pretrained, str) and (gpus >= 1 or str(self.device) == "mps"):
223
+ # TODO: can remove this whole snippet except in the mps case, perhaps?
224
+ if not (parallelize or autogptq or hasattr(self, "accelerator")):
225
+ # place model onto device requested manually,
226
+ # if not using HF Accelerate or device_map
227
+ # or any other option that preloads model onto device
228
+ try:
229
+ self.model.to(self.device)
230
+ except ValueError:
231
+ eval_logger.debug(
232
+ "Failed to place model onto specified device. This may be because the model is quantized via `bitsandbytes` or `device_map` is provided. If the desired GPU is being used, this message is safe to ignore."
233
+ )
234
+
235
+ self._create_tokenizer(
236
+ pretrained,
237
+ tokenizer,
238
+ revision=revision,
239
+ trust_remote_code=trust_remote_code,
240
+ use_fast_tokenizer=use_fast_tokenizer,
241
+ )
242
+
243
+ self.truncation = truncation
244
+ self.logits_cache = logits_cache
245
+ self.vocab_size = self.tokenizer.vocab_size
246
+ # select (or create) a pad token to use
247
+ if self.tokenizer.pad_token:
248
+ pass
249
+ elif self.tokenizer.unk_token:
250
+ self.tokenizer.pad_token_id = self.tokenizer.unk_token_id
251
+ elif self.tokenizer.eos_token:
252
+ self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
253
+ else:
254
+ if getattr(self.config, "model_type", None) == "qwen":
255
+ # Qwen's trust_remote_code tokenizer does not allow for adding special tokens
256
+ self.tokenizer.pad_token = "<|endoftext|>"
257
+ elif (
258
+ self.tokenizer.__class__.__name__ == "RWKVWorldTokenizer"
259
+ or self.tokenizer.__class__.__name__ == "Rwkv5Tokenizer"
260
+ ):
261
+ # The RWKV world tokenizer, does not allow for adding special tokens / setting the pad token (which is set as 0)
262
+ # The additional tokenizer name check is needed, as there exists rwkv4 models with neox tokenizer
263
+ # ---
264
+ # Note that the world tokenizer class name, might change in the future for the final huggingface merge
265
+ # https://github.com/huggingface/transformers/pull/26963
266
+ assert self.tokenizer.pad_token_id == 0
267
+ else:
268
+ self.tokenizer.add_special_tokens({"pad_token": "<|pad|>"})
269
+
270
+ # TODO: override this for Gemma
271
+ self.add_bos_token = add_bos_token
272
+ if getattr(self.config, "model_type", None) == "gemma":
273
+ self.add_bos_token = True
274
+ eval_logger.info(
275
+ f"Model type is '{self.config.model_type}', a BOS token will be used as Gemma underperforms without it."
276
+ )
277
+
278
+ self._max_length = max_length
279
+
280
+ self.batch_schedule = 1
281
+ self.batch_sizes = {}
282
+ self.max_batch_size = max_batch_size
283
+
284
+ if str(batch_size).startswith("auto"):
285
+ batch_size = batch_size.split(":")
286
+ self.batch_size_per_gpu = batch_size[0]
287
+ self.batch_schedule = float(batch_size[1]) if len(batch_size) > 1 else 1
288
+ else:
289
+ self.batch_size_per_gpu = int(batch_size)
290
+
291
+ if isinstance(pretrained, str):
292
+ # multigpu data-parallel support when launched with accelerate
293
+ if gpus > 1:
294
+ if parallelize:
295
+ if accelerator.num_processes > 1:
296
+ raise RuntimeError(
297
+ "Attempted to use both a HF Accelerate `device_map` and to launch via `accelerate launch`. If this is the case, please either remove `parallelize=True` from --model_args or launch outside of the Accelerate launcher."
298
+ )
299
+ else:
300
+ pass
301
+ elif accelerator.num_processes == 1:
302
+ # if we aren't launching via accelerate, ditch
303
+ self._rank = 0
304
+ self._world_size = 1
305
+ else:
306
+ if gpus > accelerator.num_processes:
307
+ eval_logger.warning(
308
+ "WARNING: The number of total system GPUs does not match the number of spawned processes. "
309
+ "If you would like to use data parallelism, please launch the script "
310
+ "with 'accelerate launch *script*'. "
311
+ f"Current run will proceed with {accelerator.num_processes} devices."
312
+ )
313
+ assert (
314
+ accelerator.distributed_type
315
+ in [
316
+ DistributedType.FSDP,
317
+ DistributedType.MULTI_GPU,
318
+ ]
319
+ ), "Unsupported distributed type provided. Only DDP and FSDP are supported."
320
+ if accelerator.distributed_type == DistributedType.FSDP:
321
+ self._model = accelerator.prepare(self.model)
322
+ else:
323
+ self._model = accelerator.prepare_model(
324
+ self.model, evaluation_mode=True
325
+ )
326
+ self._device = torch.device(
327
+ f"cuda:{accelerator.local_process_index}"
328
+ )
329
+ self.accelerator = accelerator
330
+
331
+ if self.accelerator.is_local_main_process:
332
+ eval_logger.info(f"Using {gpus} devices with data parallelism")
333
+
334
+ self._rank = self.accelerator.local_process_index
335
+ self._world_size = self.accelerator.num_processes
336
+ else:
337
+ # if a PreTrainedModel was passed into HFLM, we forgo distributed setup.
338
+ eval_logger.warning(
339
+ "Passed an already-initialized model through `pretrained`, assuming single-process call to evaluate() or custom distributed integration"
340
+ )
341
+ self._rank = 0
342
+ self._world_size = 1
343
+
344
+ self.custom_prefix_token_id = prefix_token_id
345
+ if prefix_token_id is not None:
346
+ eval_logger.info(
347
+ f"Loglikelihood prefix token id used in evaluation: {self.prefix_token_id}"
348
+ )
349
+
350
+ @property
351
+ def config(self):
352
+ # return the associated transformers.AutoConfig for the given pretrained model.
353
+ return self._config
354
+
355
+ @property
356
+ def model(self):
357
+ # returns the model, unwrapping it if using Accelerate
358
+ if hasattr(self, "accelerator"):
359
+ return self.accelerator.unwrap_model(self._model)
360
+ else:
361
+ return self._model
362
+
363
+ @property
364
+ def eot_token_id(self):
365
+ # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
366
+ return self.tokenizer.eos_token_id
367
+
368
+ @property
369
+ def prefix_token_id(self):
370
+ # it is used as prefix for loglikelihood
371
+ if self.custom_prefix_token_id is not None:
372
+ return self.custom_prefix_token_id
373
+ if self.tokenizer.bos_token_id is not None:
374
+ return self.tokenizer.bos_token_id
375
+ return self.tokenizer.eos_token_id
376
+
377
+ @property
378
+ def max_length(self):
379
+ if self._max_length: # if max length manually set, return it
380
+ return self._max_length
381
+ seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx")
382
+ for attr in seqlen_config_attrs:
383
+ if hasattr(self.model.config, attr):
384
+ return getattr(self.model.config, attr)
385
+ if hasattr(self.tokenizer, "model_max_length"):
386
+ if self.tokenizer.model_max_length == 1000000000000000019884624838656:
387
+ return self._DEFAULT_MAX_LENGTH
388
+ return self.tokenizer.model_max_length
389
+ return self._DEFAULT_MAX_LENGTH
390
+
391
+ @property
392
+ def max_gen_toks(self) -> int:
393
+ return 256
394
+
395
+ @property
396
+ def batch_size(self):
397
+ return self.batch_size_per_gpu
398
+
399
+ @property
400
+ def device(self):
401
+ return self._device
402
+
403
+ @property
404
+ def rank(self):
405
+ return self._rank
406
+
407
+ @property
408
+ def world_size(self):
409
+ return self._world_size
410
+
411
+ def _get_backend(
412
+ self,
413
+ config: Union[transformers.PretrainedConfig, transformers.AutoConfig],
414
+ backend: Optional[Literal["default", "causal", "seq2seq"]] = "default",
415
+ trust_remote_code: Optional[bool] = False,
416
+ ) -> None:
417
+ """
418
+ Helper method during initialization.
419
+ Determines the backend ("causal" (decoder-only) or "seq2seq" (encoder-decoder))
420
+ model type to be used.
421
+ """
422
+ assert backend in ["default", "causal", "seq2seq"]
423
+
424
+ if backend != "default":
425
+ # if we've settled on non-default backend, use that manually
426
+ if backend == "causal":
427
+ self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM
428
+ elif backend == "seq2seq":
429
+ self.AUTO_MODEL_CLASS = transformers.AutoModelForSeq2SeqLM
430
+ eval_logger.info(
431
+ f"Overrode HF model backend type, and using type '{backend}'"
432
+ )
433
+ else:
434
+ # determine and use the default HF backend for this model, based on its config + metadata.
435
+ if (
436
+ getattr(config, "model_type")
437
+ in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
438
+ ):
439
+ # first check if model type is listed under seq2seq models, since some
440
+ # models like MBart are listed in both seq2seq and causal mistakenly in HF transformers.
441
+ # these special cases should be treated as seq2seq models.
442
+ self.AUTO_MODEL_CLASS = transformers.AutoModelForSeq2SeqLM
443
+ elif (
444
+ getattr(self.config, "model_type") in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
445
+ ):
446
+ self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM
447
+ else:
448
+ if not trust_remote_code:
449
+ eval_logger.warning(
450
+ "HF model type is neither marked as CausalLM or Seq2SeqLM. \
451
+ This is expected if your model requires `trust_remote_code=True` but may be an error otherwise."
452
+ )
453
+ # if model type is neither in HF transformers causal or seq2seq model registries
454
+ # then we default to AutoModelForCausalLM
455
+ self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM
456
+
457
+ assert self.AUTO_MODEL_CLASS in [
458
+ transformers.AutoModelForCausalLM,
459
+ transformers.AutoModelForSeq2SeqLM,
460
+ ]
461
+ return None
462
+
463
+ def _get_config(
464
+ self,
465
+ pretrained: str,
466
+ revision: str = "main",
467
+ trust_remote_code: bool = False,
468
+ ) -> None:
469
+ self._config = transformers.AutoConfig.from_pretrained(
470
+ pretrained,
471
+ revision=revision,
472
+ trust_remote_code=trust_remote_code,
473
+ )
474
+
475
+ def _create_model(
476
+ self,
477
+ pretrained: str,
478
+ revision: Optional[str] = "main",
479
+ dtype: Optional[Union[str, torch.dtype]] = "auto",
480
+ trust_remote_code: Optional[bool] = False,
481
+ # arguments used for splitting a model across GPUs naively.
482
+ # only used if `parallelize=True`.
483
+ # (accelerate naive PP (device_map) options)
484
+ parallelize: Optional[bool] = False,
485
+ device_map_option: Optional[str] = "auto",
486
+ max_memory_per_gpu: Optional[Union[int, str]] = None,
487
+ max_cpu_memory: Optional[Union[int, str]] = None,
488
+ offload_folder: Optional[str] = "./offload",
489
+ # PEFT and quantization options
490
+ peft: Optional[str] = None,
491
+ autogptq: Optional[Union[bool, str]] = False,
492
+ **kwargs,
493
+ ) -> None:
494
+ """
495
+ Initializes an HF or HF-compatible PreTrainedModel from scratch
496
+ inside HFLM, using the kwargs passed into self.__init__().
497
+
498
+ Also handles functionality such as AutoGPTQ usage and PEFT wrapping.
499
+
500
+ For future similar extensions to AutoGPTQ that are not core to HF's ecosystem,
501
+ (such as PyTorch models that are nearly, but not quite, fully mirroring
502
+ HF's public interface relied on in this HFLM class)
503
+ please consider subclassing HFLM and overriding this and other methods as needed.
504
+ """
505
+
506
+ model_kwargs = kwargs if kwargs else {}
507
+
508
+ if parallelize:
509
+ model_kwargs.update(
510
+ _get_accelerate_args(
511
+ device_map_option, # TODO: phase out device_map_option?
512
+ max_memory_per_gpu,
513
+ max_cpu_memory,
514
+ offload_folder,
515
+ )
516
+ )
517
+ elif "device_map" not in model_kwargs:
518
+ # set a device_map to initialize model on the right GPU.
519
+ # this is needed because it seems that the default behavior
520
+ # for quantized models now seems to be device_map="auto"
521
+ # which breaks data-parallel mode.
522
+ if hasattr(self, "accelerator"):
523
+ model_kwargs.update(
524
+ {"device_map": {"": f"cuda:{self.accelerator.local_process_index}"}}
525
+ )
526
+ else:
527
+ model_kwargs.update({"device_map": {"": str(self.device)}})
528
+
529
+ if not autogptq:
530
+ if model_kwargs.get("load_in_4bit", None):
531
+ assert (
532
+ transformers.__version__ >= "4.30.0"
533
+ ), "load_in_4bit requires transformers >= 4.30.0"
534
+ if transformers.__version__ >= "4.30.0":
535
+ if model_kwargs.get("load_in_4bit", None):
536
+ if model_kwargs.get("bnb_4bit_compute_dtype", None):
537
+ model_kwargs["bnb_4bit_compute_dtype"] = get_dtype(
538
+ model_kwargs["bnb_4bit_compute_dtype"]
539
+ )
540
+ self._model = self.AUTO_MODEL_CLASS.from_pretrained(
541
+ pretrained,
542
+ revision=revision,
543
+ torch_dtype=get_dtype(dtype),
544
+ trust_remote_code=trust_remote_code,
545
+ **model_kwargs,
546
+ )
547
+ else:
548
+ try:
549
+ from auto_gptq import AutoGPTQForCausalLM
550
+ except ModuleNotFoundError:
551
+ raise Exception(
552
+ "Tried to load auto_gptq, but auto-gptq is not installed ",
553
+ "please install auto-gptq via pip install lm-eval[gptq] or pip install -e .[gptq]",
554
+ )
555
+
556
+ self._model = AutoGPTQForCausalLM.from_quantized(
557
+ pretrained,
558
+ trust_remote_code=trust_remote_code,
559
+ model_basename=None if autogptq is True else Path(autogptq).stem,
560
+ use_safetensors=True
561
+ if autogptq is True
562
+ else autogptq.endswith(".safetensors"),
563
+ **model_kwargs,
564
+ )
565
+
566
+ if peft:
567
+ if model_kwargs.get("load_in_4bit", None):
568
+ if version.parse(PEFT_VERSION) < version.parse("0.4.0"):
569
+ raise AssertionError("load_in_4bit requires peft >= 0.4.0")
570
+ self._model = PeftModel.from_pretrained(
571
+ self._model, peft, revision=revision
572
+ )
573
+
574
+ return None
575
+
576
+ def _create_tokenizer(
577
+ self,
578
+ pretrained: Union[str, transformers.PreTrainedModel],
579
+ tokenizer: Optional[
580
+ Union[
581
+ str,
582
+ transformers.PreTrainedTokenizer,
583
+ transformers.PreTrainedTokenizerFast,
584
+ ]
585
+ ],
586
+ revision: Optional[str] = "main",
587
+ trust_remote_code: Optional[bool] = False,
588
+ use_fast_tokenizer: Optional[bool] = True,
589
+ ) -> None:
590
+ """
591
+ Helper method during initialization.
592
+
593
+ Create a tokenizer object corresponding to the correct
594
+ tokenizer for value of `pretrained`, or use the pre-initialized tokenizer passed.
595
+ """
596
+
597
+ if tokenizer:
598
+ if isinstance(tokenizer, str):
599
+ self.tokenizer = transformers.AutoTokenizer.from_pretrained(
600
+ tokenizer,
601
+ revision=revision,
602
+ trust_remote_code=trust_remote_code,
603
+ use_fast=use_fast_tokenizer,
604
+ )
605
+ else:
606
+ assert isinstance(
607
+ tokenizer, transformers.PreTrainedTokenizer
608
+ ) or isinstance(tokenizer, transformers.PreTrainedTokenizerFast)
609
+ self.tokenizer = tokenizer
610
+ else:
611
+ # Get tokenizer based on 'pretrained'
612
+ if isinstance(pretrained, str):
613
+ model_name = pretrained
614
+ else:
615
+ # get the HF hub name via accessor on model
616
+ model_name = self.model.name_or_path
617
+ self.tokenizer = transformers.AutoTokenizer.from_pretrained(
618
+ model_name,
619
+ revision=revision,
620
+ trust_remote_code=trust_remote_code,
621
+ use_fast=use_fast_tokenizer,
622
+ )
623
+ return None
624
+
625
+ def _detect_batch_size(self, requests=None, pos: int = 0):
626
+ if requests:
627
+ _, context_enc, continuation_enc = requests[pos]
628
+ max_length = len(
629
+ (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1]
630
+ )
631
+ max_context_enc = len(context_enc[-(self.max_length + 1) :])
632
+ max_cont_enc = len(continuation_enc[-(self.max_length + 1) :])
633
+ else:
634
+ max_length = self.max_length
635
+
636
+ # if OOM, then halves batch_size and tries again
637
+ @find_executable_batch_size(starting_batch_size=self.max_batch_size)
638
+ def forward_batch(batch_size):
639
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
640
+ length = max(max_context_enc, max_cont_enc)
641
+ batched_conts = torch.ones(
642
+ (batch_size, length), device=self.device
643
+ ).long()
644
+ test_batch = torch.ones((batch_size, length), device=self.device).long()
645
+ call_kwargs = {
646
+ "attn_mask": test_batch,
647
+ "labels": batched_conts,
648
+ }
649
+ else:
650
+ call_kwargs = {}
651
+ test_batch = torch.ones(
652
+ (batch_size, max_length), device=self.device
653
+ ).long()
654
+ for _ in range(5):
655
+ out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1) # noqa: F841
656
+
657
+ return batch_size
658
+
659
+ try:
660
+ batch_size = forward_batch()
661
+ except RuntimeError as e:
662
+ if "No executable batch size found" in str(e):
663
+ batch_size = 1
664
+ else:
665
+ raise
666
+
667
+ if self.world_size > 1:
668
+ # if multi-GPU, always take minimum over all selected batch sizes
669
+ max_rnk_bs = torch.tensor([batch_size], device=self.device)
670
+ gathered = (
671
+ self.accelerator.gather(max_rnk_bs).cpu().detach().numpy().tolist()
672
+ )
673
+ batch_size = min(gathered)
674
+ clear_torch_cache()
675
+ return batch_size
676
+
677
+ clear_torch_cache()
678
+ return batch_size
679
+
680
+ def tok_encode(
681
+ self, string: str, left_truncate_len=None, add_special_tokens=None
682
+ ) -> List[int]:
683
+ """ """
684
+ # default for None - empty dict, use predefined tokenizer param
685
+ # used for all models except for CausalLM or predefined value
686
+ special_tokens_kwargs = {}
687
+
688
+ # by default for CausalLM - false or self.add_bos_token is set
689
+ if add_special_tokens is None:
690
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
691
+ special_tokens_kwargs = {
692
+ "add_special_tokens": False or self.add_bos_token
693
+ }
694
+ # otherwise the method explicitly defines the value
695
+ else:
696
+ special_tokens_kwargs = {"add_special_tokens": add_special_tokens}
697
+
698
+ encoding = self.tokenizer.encode(string, **special_tokens_kwargs)
699
+
700
+ # left-truncate the encoded context to be at most `left_truncate_len` tokens long
701
+ if left_truncate_len:
702
+ encoding = encoding[-left_truncate_len:]
703
+
704
+ return encoding
705
+
706
+ def tok_batch_encode(
707
+ self,
708
+ strings: List[str],
709
+ padding_side: str = "left",
710
+ left_truncate_len: int = None,
711
+ truncation: bool = False,
712
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
713
+ # encode a batch of strings. converts to tensors and pads automatically, unlike tok_encode.
714
+ old_padding_side = self.tokenizer.padding_side
715
+ self.tokenizer.padding_side = padding_side
716
+
717
+ add_special_tokens = {}
718
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
719
+ add_special_tokens = {"add_special_tokens": False or self.add_bos_token}
720
+
721
+ encoding = self.tokenizer(
722
+ strings,
723
+ truncation=truncation,
724
+ padding="longest",
725
+ return_tensors="pt",
726
+ **add_special_tokens,
727
+ )
728
+ if left_truncate_len:
729
+ encoding["input_ids"] = encoding["input_ids"][:, -left_truncate_len:]
730
+ encoding["attention_mask"] = encoding["attention_mask"][
731
+ :, -left_truncate_len:
732
+ ]
733
+ self.tokenizer.padding_side = old_padding_side
734
+
735
+ return encoding["input_ids"], encoding["attention_mask"]
736
+
737
+ def tok_decode(self, tokens, skip_special_tokens=True):
738
+ return self.tokenizer.decode(tokens, skip_special_tokens=skip_special_tokens)
739
+
740
+ def _model_call(self, inps, attn_mask=None, labels=None):
741
+ """
742
+ :param inps: torch.Tensor
743
+ A torch tensor of shape [batch, (sequence_ctx + sequence_cont)] or of shape
744
+ [batch, sequence_ctx]. the size of sequence may vary from call to call
745
+ :param attn_mask: torch.Tensor, optional
746
+ A torch tensor of shape [batch, (sequence_ctx + sequence_cont)]. Only passed
747
+ (and must be passed) if self.AUTO_MODEL_CLASS is transformers.AutoModelForSeq2SeqLM
748
+ :param labels: torch.Tensor, optional
749
+ A torch tensor of shape [batch, (sequence_ctx + sequence_cont)]. Only passed
750
+ (and must be passed) if self.AUTO_MODEL_CLASS is transformers.AutoModelForSeq2SeqLM
751
+ :return
752
+ A torch tensor of shape [batch, sequence, vocab] with the
753
+ logits returned from the model's decoder
754
+ """
755
+ with torch.no_grad():
756
+ if attn_mask is not None or labels is not None:
757
+ assert attn_mask is not None and labels is not None
758
+ assert self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM
759
+ return self.model(
760
+ input_ids=inps, attention_mask=attn_mask, labels=labels
761
+ ).logits
762
+ else:
763
+ assert self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM
764
+ return self.model(inps).logits
765
+
766
+ def _model_generate(self, context, max_length, stop, **generation_kwargs):
767
+ # temperature = 0.0 if not set
768
+ # if do_sample is false and temp==0.0:
769
+ # remove temperature, as do_sample=False takes care of this
770
+ # and we don't want a warning from HF
771
+ generation_kwargs["temperature"] = generation_kwargs.get("temperature", 0.0)
772
+ do_sample = generation_kwargs.get("do_sample", None)
773
+
774
+ # The temperature has to be a strictly positive float -- if it is 0.0, use greedy decoding strategies
775
+ if generation_kwargs.get("temperature") == 0.0 and do_sample is None:
776
+ generation_kwargs["do_sample"] = do_sample = False
777
+
778
+ if do_sample is False and generation_kwargs.get("temperature") == 0.0:
779
+ generation_kwargs.pop("temperature")
780
+ # build stopping criteria
781
+ stopping_criteria = stop_sequences_criteria(
782
+ self.tokenizer, stop, context.shape[1], context.shape[0]
783
+ )
784
+ return self.model.generate(
785
+ input_ids=context,
786
+ max_length=max_length,
787
+ stopping_criteria=stopping_criteria,
788
+ pad_token_id=self.tokenizer.pad_token_id,
789
+ use_cache=True,
790
+ **generation_kwargs,
791
+ )
792
+
793
+ def _select_cont_toks(
794
+ self, logits: torch.Tensor, contlen: int = None, inplen: int = None
795
+ ) -> torch.Tensor:
796
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
797
+ assert (
798
+ contlen and inplen
799
+ ), "Must pass input len and cont. len to select scored logits for causal LM"
800
+ # discard right-padding.
801
+ # also discard the input/context tokens. we'll only score continuations.
802
+ logits = logits[inplen - contlen : inplen]
803
+ elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
804
+ assert (
805
+ contlen and not inplen
806
+ ), "Selecting scored logits for Seq2SeqLM requires only cont. len"
807
+ # only discard right-padding.
808
+ # the logits input to this fn only contain decoder-side tokens.
809
+ logits = logits[:contlen]
810
+
811
+ return logits
812
+
813
+ def loglikelihood_rolling(
814
+ self, requests: List[Instance], disable_tqdm: bool = False
815
+ ) -> List[float]:
816
+ loglikelihoods = []
817
+
818
+ adaptive_batch_size = None
819
+ if self.batch_size == "auto":
820
+ # using rolling window with maximum context
821
+ print("Passed argument batch_size = auto. Detecting largest batch size")
822
+ batch_size = self._detect_batch_size()
823
+ print(f"Determined Largest batch size: {batch_size}")
824
+ adaptive_batch_size = batch_size
825
+
826
+ for (string,) in tqdm(
827
+ [req.args for req in requests], disable=(disable_tqdm or (self.rank != 0))
828
+ ):
829
+ rolling_token_windows = list(
830
+ map(
831
+ utils.make_disjoint_window,
832
+ utils.get_rolling_token_windows(
833
+ token_list=self.tok_encode(string),
834
+ prefix_token=self.prefix_token_id,
835
+ max_seq_len=self.max_length,
836
+ context_len=1,
837
+ ),
838
+ )
839
+ )
840
+
841
+ # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case
842
+ rolling_token_windows = [(None,) + x for x in rolling_token_windows]
843
+
844
+ pad_amnt = 0
845
+ if self.world_size > 1:
846
+ # We pad out the external document-level iterator so the inner iterator doesn't hang
847
+ mytensor = torch.tensor(len(rolling_token_windows), device=self.device)
848
+ gathered = (
849
+ self.accelerator.gather(mytensor).cpu().detach().numpy().tolist()
850
+ )
851
+
852
+ pad_amnt = max(gathered) - gathered[self.rank]
853
+ if pad_amnt > 0:
854
+ rolling_token_windows += pad_amnt * [rolling_token_windows[0]]
855
+
856
+ string_nll = self._loglikelihood_tokens(
857
+ requests=rolling_token_windows,
858
+ disable_tqdm=True,
859
+ override_bs=adaptive_batch_size,
860
+ )
861
+
862
+ if (self.world_size > 1) and (pad_amnt > 0):
863
+ string_nll = [x[0] for x in string_nll[:-pad_amnt]]
864
+ else:
865
+ # discard is_greedy
866
+ string_nll = [x[0] for x in string_nll]
867
+
868
+ string_nll = sum(string_nll)
869
+ loglikelihoods.append(string_nll)
870
+
871
+ return loglikelihoods
872
+
873
+ def _batch_scheduler(self, pos, n_reordered_requests):
874
+ sched = pos // int(len(n_reordered_requests) / self.batch_schedule)
875
+ if sched in self.batch_sizes:
876
+ return self.batch_sizes[sched]
877
+ if (len(self.batch_sizes) > 1) and (
878
+ self.batch_sizes[sched - 1] == self.max_batch_size
879
+ ):
880
+ # if previous batch size is already maximal, skip recomputation
881
+ self.batch_sizes[sched] = self.max_batch_size
882
+ return self.batch_sizes[sched]
883
+ print(
884
+ f"Passed argument batch_size = auto:{self.batch_schedule}. Detecting largest batch size"
885
+ )
886
+ self.batch_sizes[sched] = self._detect_batch_size(n_reordered_requests, pos)
887
+ print(f"Determined largest batch size: {self.batch_sizes[sched]}")
888
+ return self.batch_sizes[sched]
889
+
890
+ def _loglikelihood_tokens(
891
+ self,
892
+ requests: List[Tuple[Tuple[str, str], List[int], List[int]]],
893
+ disable_tqdm: bool = False,
894
+ override_bs: int = None,
895
+ ) -> List[Tuple[float, bool]]:
896
+ # TODO: implement some kind of efficient-request-middleware that lumps together requests with the same context
897
+ res = []
898
+
899
+ def _collate(req: Tuple[Tuple[str, str], List[int], List[int]]):
900
+ """Defines the key for the sorted method"""
901
+ # the negative sign on len(toks) sorts descending - this has a few advantages:
902
+ # - time estimates will always be over not underestimates, which is more useful for planning
903
+ # - to know the size of a batch when going through the list, you know the first one is always the batch
904
+ # padded context length. this is useful to simplify the batching logic and more importantly to make
905
+ # automatic adaptive batches much much easier to implement
906
+ # - any OOMs will happen right away rather than near the end
907
+
908
+ toks = req[1] + req[2]
909
+ return -len(toks), tuple(toks)
910
+
911
+ def _lookup_one_token_cont(req: Tuple[Tuple[str, str], List[int], List[int]]):
912
+ """Defines the key to group and lookup one-token continuations"""
913
+ # Use with group_by="contexts" (optional)"
914
+ # allows for the creation of a lookup, so we can reuse logits in case of one-token continuations.
915
+ # speeds up some multiple-choice tasks proportionally to the number of choices.
916
+ # groups requests by context+continuation[:-1] and infer on one request/group.
917
+ return req[-2] + req[-1][:-1]
918
+
919
+ re_ord = Collator(
920
+ requests,
921
+ sort_fn=_collate,
922
+ group_by="contexts"
923
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM
924
+ and self.logits_cache
925
+ else None,
926
+ group_fn=_lookup_one_token_cont,
927
+ )
928
+
929
+ # automatic (variable) batch size detection for vectorization
930
+ # pull longest context sample from request
931
+ n_reordered_requests = len(re_ord)
932
+ batch_size = (
933
+ self.batch_size
934
+ if self.batch_size != "auto"
935
+ else override_bs
936
+ if override_bs is not None
937
+ else 0
938
+ )
939
+ batch_fn = (
940
+ self._batch_scheduler
941
+ if self.batch_size == "auto"
942
+ and n_reordered_requests > 0
943
+ and not override_bs
944
+ else None
945
+ )
946
+
947
+ chunks = re_ord.get_batched(n=batch_size, batch_fn=batch_fn)
948
+ pbar = tqdm(
949
+ total=len(requests),
950
+ disable=(disable_tqdm or (self.rank != 0)),
951
+ desc="Running loglikelihood requests",
952
+ )
953
+ for chunk in chunks:
954
+ inps = []
955
+ cont_toks_list = []
956
+ inplens = []
957
+
958
+ conts = []
959
+ encoder_attns = []
960
+
961
+ padding_len_inp = None
962
+ padding_len_cont = None
963
+ # because vectorizing is annoying, we first convert each (context, continuation) pair to padded
964
+ # tensors, then we pack them together into a batch, call the model, and then pick it all apart
965
+ # again because vectorizing is annoying
966
+
967
+ for _, context_enc, continuation_enc in chunk:
968
+ # sanity check
969
+ assert len(context_enc) > 0
970
+ assert len(continuation_enc) > 0
971
+ assert len(continuation_enc) <= self.max_length
972
+
973
+ # how this all works (illustrated on a causal decoder-only setup):
974
+ # CTX CONT
975
+ # inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1]
976
+ # model \ \
977
+ # logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the
978
+ # cont_toks 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice
979
+
980
+ # when too long to fit in context, truncate from the left
981
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
982
+ inp = torch.tensor(
983
+ (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1],
984
+ dtype=torch.long,
985
+ device=self.device,
986
+ )
987
+ (inplen,) = inp.shape
988
+ elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
989
+ inp = torch.tensor(
990
+ (context_enc)[-self.max_length :],
991
+ dtype=torch.long,
992
+ device=self.device,
993
+ )
994
+ (inplen,) = inp.shape
995
+
996
+ # build encoder attn masks
997
+ encoder_attns.append(torch.ones_like(inp))
998
+
999
+ cont = torch.tensor(
1000
+ (continuation_enc)[-self.max_length :],
1001
+ # TODO: left-shift these?
1002
+ # TODO: our code assumes we never end up truncating conts for either model type
1003
+ dtype=torch.long,
1004
+ device=self.device,
1005
+ )
1006
+ (contlen,) = cont.shape
1007
+
1008
+ conts.append(cont)
1009
+
1010
+ padding_len_cont = (
1011
+ max(padding_len_cont, contlen)
1012
+ if padding_len_cont is not None
1013
+ else contlen
1014
+ )
1015
+
1016
+ padding_len_inp = (
1017
+ max(padding_len_inp, inplen)
1018
+ if padding_len_inp is not None
1019
+ else inplen
1020
+ )
1021
+
1022
+ inps.append(inp) # [1, inp_length]
1023
+ cont_toks_list.append(continuation_enc)
1024
+ inplens.append(inplen)
1025
+
1026
+ # create encoder attn mask and batched conts, if seq2seq
1027
+ call_kwargs = {}
1028
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
1029
+ batched_inps = pad_and_concat(
1030
+ padding_len_inp, inps, padding_side="right"
1031
+ ) # [batch, padding_len_inp]
1032
+ elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
1033
+ # TODO: left-pad encoder inps and mask?
1034
+ batched_inps = pad_and_concat(
1035
+ padding_len_inp, inps
1036
+ ) # [batch, padding_len_inp]
1037
+ batched_conts = pad_and_concat(
1038
+ padding_len_cont, conts
1039
+ ) # [batch, padding_len_cont]
1040
+ batched_encoder_mask = pad_and_concat(
1041
+ padding_len_inp, encoder_attns
1042
+ ) # [batch, padding_len_inp]
1043
+ call_kwargs = {
1044
+ "attn_mask": batched_encoder_mask,
1045
+ "labels": batched_conts,
1046
+ }
1047
+
1048
+ multi_logits = F.log_softmax(
1049
+ self._model_call(batched_inps, **call_kwargs), dim=-1
1050
+ ) # [batch, padding_length (inp or cont), vocab]
1051
+
1052
+ for (request_str, ctx_tokens, _), logits, inplen, cont_toks in zip(
1053
+ chunk, multi_logits, inplens, cont_toks_list
1054
+ ):
1055
+ # Slice to original seq length
1056
+ contlen = len(cont_toks)
1057
+ # take only logits in the continuation
1058
+ # (discard context toks if decoder-only ; discard right-padding)
1059
+ # also discards + checks for "virtual tokens" in the causal LM's input window
1060
+ # from prompt/prefix tuning tokens, if applicable
1061
+ ctx_len = (
1062
+ inplen + (logits.shape[0] - padding_len_inp)
1063
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM
1064
+ else None
1065
+ )
1066
+ logits = self._select_cont_toks(logits, contlen=contlen, inplen=ctx_len)
1067
+ logits = logits.unsqueeze(0) # [1, seq, vocab]
1068
+
1069
+ # Check if per-token argmax is exactly equal to continuation
1070
+ greedy_tokens = logits.argmax(dim=-1)
1071
+
1072
+ # check for one-token continuation cache hits.
1073
+ # noop in case group_by != "contexts" or no cache hit and returns the
1074
+ # original args. Otherwise, expands the logits batch dimension and yields each
1075
+ # batch along with matching continuation tokens and prompt strings.
1076
+ # logits -> [1, seq, vocab]
1077
+ for request_str, cont_toks, logits in re_ord.get_cache(
1078
+ req_str=request_str,
1079
+ cxt_toks=ctx_tokens,
1080
+ cont_toks=cont_toks,
1081
+ logits=logits,
1082
+ ):
1083
+ cont_toks = torch.tensor(
1084
+ cont_toks, dtype=torch.long, device=self.device
1085
+ ).unsqueeze(0) # [1, seq]
1086
+ max_equal = (greedy_tokens == cont_toks).all()
1087
+
1088
+ # Obtain log-probs at the corresponding continuation token indices
1089
+ # last_token_slice = logits[:, -1, :].squeeze(0).tolist()
1090
+ logits = torch.gather(logits, 2, cont_toks.unsqueeze(-1)).squeeze(
1091
+ -1
1092
+ ) # [1, seq]
1093
+
1094
+ # Answer: (log prob, is-exact-match)
1095
+ answer = (float(logits.sum()), bool(max_equal))
1096
+
1097
+ res.append(answer)
1098
+
1099
+ self.cache_hook.add_partial("loglikelihood", request_str, answer)
1100
+ pbar.update(1)
1101
+
1102
+ pbar.close()
1103
+
1104
+ return re_ord.get_original(res)
1105
+
1106
+ def generate_until(
1107
+ self, requests: List[Instance], disable_tqdm: bool = False
1108
+ ) -> List[str]:
1109
+ res = []
1110
+
1111
+ def _collate(req: Tuple[str, dict]):
1112
+ """Defines the key for the sorted method"""
1113
+ # the negative sign on len(toks) sorts descending - this has a few advantages:
1114
+ # - time estimates will always be over not underestimates, which is more useful for planning
1115
+ # - to know the size of a batch when going through the list, you know the first one is always the batch
1116
+ # padded context length. this is useful to simplify the batching logic and more importantly to make
1117
+ # automatic adaptive batches much much easier to implement
1118
+ # - any OOMs will happen right away rather than near the end
1119
+ toks = self.tok_encode(req[0])
1120
+ return -len(toks), req[0]
1121
+
1122
+ pbar = tqdm(
1123
+ total=len(requests),
1124
+ disable=(disable_tqdm or (self.rank != 0)),
1125
+ desc="Running generate_until requests",
1126
+ )
1127
+ adaptive_batch_size = None
1128
+ if self.batch_size == "auto":
1129
+ # using rolling window with maximum context
1130
+ print("Passed argument batch_size = auto. Detecting largest batch size")
1131
+ batch_size = self._detect_batch_size()
1132
+ print(f"Determined Largest batch size: {batch_size}")
1133
+ adaptive_batch_size = batch_size
1134
+ # for each different set of kwargs, we execute all requests, by batch.
1135
+ batch_size = (
1136
+ self.batch_size
1137
+ if self.batch_size != "auto"
1138
+ else adaptive_batch_size
1139
+ if adaptive_batch_size is not None
1140
+ else 0
1141
+ )
1142
+ batch_fn = (
1143
+ self._batch_scheduler
1144
+ if self.batch_size == "auto" and not adaptive_batch_size
1145
+ else None
1146
+ )
1147
+
1148
+ # we group requests by their generation_kwargs,
1149
+ # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
1150
+ # in the same batch.
1151
+ # group_fn=lambda x: x[1] -> x=(context, gen_kwargs)
1152
+ re_ords = Collator(
1153
+ [reg.args for reg in requests],
1154
+ sort_fn=_collate,
1155
+ group_by="gen_kwargs",
1156
+ group_fn=lambda x: x[1],
1157
+ )
1158
+ chunks = re_ords.get_batched(n=batch_size, batch_fn=batch_fn)
1159
+ for chunk in chunks:
1160
+ contexts, all_gen_kwargs = zip(*chunk)
1161
+ # we assume all gen kwargs in the batch are the same
1162
+ # this is safe to assume because the `grouper` object ensures it.
1163
+ gen_kwargs = all_gen_kwargs[0]
1164
+ # unpack our keyword arguments.
1165
+ until = None
1166
+ if isinstance(gen_kwargs, dict):
1167
+ kwargs = copy.deepcopy(gen_kwargs) # edge case for repeats > 1
1168
+ if "until" in kwargs.keys():
1169
+ until = kwargs.pop("until")
1170
+ if isinstance(until, str):
1171
+ until = [until]
1172
+ elif not isinstance(until, list):
1173
+ raise ValueError(
1174
+ f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}"
1175
+ )
1176
+ else:
1177
+ raise ValueError(
1178
+ f"Expected `kwargs` to be of type `dict` but got {type(gen_kwargs)}"
1179
+ )
1180
+ # add EOS token to stop sequences
1181
+ eos = self.tok_decode(self.eot_token_id, skip_special_tokens=False)
1182
+ if not until:
1183
+ until = [eos]
1184
+ else:
1185
+ until.append(eos)
1186
+ if "max_gen_toks" in kwargs.keys():
1187
+ max_gen_toks = kwargs.pop("max_gen_toks")
1188
+ else:
1189
+ max_gen_toks = self.max_gen_toks
1190
+
1191
+ # set the max length in tokens of inputs ("context_enc")
1192
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
1193
+ # max len for inputs = max length, minus room to generate the max new tokens
1194
+ max_ctx_len = self.max_length - max_gen_toks
1195
+ elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
1196
+ # max len for inputs = encoder's whole max_length
1197
+ max_ctx_len = self.max_length
1198
+
1199
+ # encode, pad, and truncate contexts for this batch
1200
+ context_enc, attn_masks = self.tok_batch_encode(
1201
+ contexts,
1202
+ left_truncate_len=max_ctx_len,
1203
+ truncation=self.truncation,
1204
+ )
1205
+ context_enc = context_enc.to(self.device)
1206
+ attn_masks = attn_masks.to(self.device)
1207
+
1208
+ if "max_length" not in kwargs:
1209
+ kwargs["max_length"] = context_enc.shape[1] + max_gen_toks
1210
+
1211
+ # perform batched generation
1212
+ cont = self._model_generate(
1213
+ context=context_enc,
1214
+ attention_mask=attn_masks,
1215
+ stop=until,
1216
+ **kwargs,
1217
+ )
1218
+
1219
+ cont_toks_list = cont.tolist()
1220
+ for cont_toks, context in zip(cont_toks_list, contexts):
1221
+ # discard context + left-padding toks if using causal decoder-only LM
1222
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
1223
+ cont_toks = cont_toks[context_enc.shape[1] :]
1224
+
1225
+ s = self.tok_decode(cont_toks)
1226
+
1227
+ # use secondary stop seqs to cut off should-have-been-stopped content post-hoc
1228
+ for term in until:
1229
+ if len(term) > 0:
1230
+ # ignore '' separator,
1231
+ # for seq2seq case where self.tok_decode(self.eot_token_id) = ''
1232
+ s = s.split(term)[0]
1233
+
1234
+ res.append(s)
1235
+
1236
+ self.cache_hook.add_partial("generate_until", (context, gen_kwargs), s)
1237
+ pbar.update(1)
1238
+ # reorder this group of results back to original unsorted form
1239
+ res = re_ords.get_original(res)
1240
+
1241
+ pbar.close()
1242
+
1243
+ return res
lm-evaluation/build/lib/lm_eval/models/mamba_lm.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Union
2
+
3
+ import torch
4
+
5
+ import lm_eval.models.utils
6
+ from lm_eval.api.registry import register_model
7
+ from lm_eval.models.huggingface import HFLM
8
+
9
+
10
+ @register_model("mamba_ssm")
11
+ class MambaLMWrapper(HFLM):
12
+ def __init__(
13
+ self,
14
+ pretrained="state-spaces/mamba-130m",
15
+ **kwargs,
16
+ ) -> None:
17
+ """
18
+ Mamba (via the `mamba_ssm` package) supports the following args:
19
+ ```
20
+ d_model: int,
21
+ n_layer: int,
22
+ vocab_size: int,
23
+ initializer_cfg=None,
24
+ pad_vocab_size_multiple: int = 1,
25
+ ssm_cfg=None,
26
+ norm_epsilon: float = 1e-5,
27
+ rms_norm: bool = False,
28
+ initializer_cfg=None,
29
+ fused_add_norm=False,
30
+ residual_in_fp32=False,
31
+ ```
32
+
33
+ See https://github.com/state-spaces/mamba/blob/main/mamba_ssm/models/mixer_seq_simple.py#L175 for more info.
34
+ The above can all be passed via `--model_args` or to this __init__() directly
35
+ but we recommend placing many of these within the config.json file uploaded alongside your
36
+ Mamba model to the HF Hub instead.
37
+ All other HuggingFace from_pretrained() kwargs
38
+ such as those related to
39
+ `parallelize=True`, PEFT, autoGPTQ,
40
+ or any sub-configurations of these advanced args,
41
+ are unsupported by the `mamba_ssm` package.
42
+
43
+ The HFLM arguments
44
+
45
+ `backend`, `tokenizer`, `truncation`, `max_length`,
46
+ `device`, `dtype`, `batch_size`, `max_batch_size`, `trust_remote_code`, `use_fast_tokenizer`
47
+
48
+ Are all supported by Mamba where they do not conflict
49
+ with Mamba-specific restrictions such as causal LMs only.
50
+ """
51
+
52
+ if "backend" in kwargs:
53
+ # mamba currently only supports causal models
54
+ assert kwargs["backend"] == "causal"
55
+
56
+ super().__init__(
57
+ pretrained=pretrained,
58
+ # set appropriate defaults for tokenizer, max length, etc
59
+ backend=kwargs.pop("backend", "causal"),
60
+ tokenizer=kwargs.pop("tokenizer", "EleutherAI/gpt-neox-20b"),
61
+ max_length=kwargs.pop("max_length", 2048),
62
+ **kwargs,
63
+ )
64
+
65
+ def _get_config(
66
+ self,
67
+ pretrained: str,
68
+ **kwargs,
69
+ ) -> None:
70
+ try:
71
+ from mamba_ssm.utils.hf import load_config_hf # noqa: F811
72
+ except ModuleNotFoundError:
73
+ raise Exception(
74
+ "attempted to use 'mamba_ssm' LM type, but package `mamba_ssm` is not installed. \
75
+ please install mamba via `pip install lm-eval[mamba]` or `pip install -e .[mamba]`",
76
+ )
77
+
78
+ self._config = load_config_hf(pretrained)
79
+
80
+ def _create_model(
81
+ self,
82
+ pretrained: str,
83
+ dtype: Optional[Union[str, torch.dtype]] = "float16",
84
+ # no `parallelize=True` options
85
+ # no PEFT and quantization options
86
+ # Mamba does not support arbitrary HF from_pretrained() args
87
+ **kwargs,
88
+ ) -> None:
89
+ try:
90
+ from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel # noqa: F811
91
+ except ModuleNotFoundError:
92
+ raise Exception(
93
+ "attempted to use 'mamba_ssm' LM type, but package `mamba_ssm` is not installed. \
94
+ please install mamba via `pip install lm-eval[mamba]` or `pip install -e .[mamba]`",
95
+ )
96
+
97
+ self._model = MambaLMHeadModel.from_pretrained(
98
+ pretrained,
99
+ device=self._device,
100
+ dtype=torch.float16
101
+ if dtype == "auto"
102
+ else lm_eval.models.utils.get_dtype(dtype),
103
+ )
104
+
105
+ def _model_generate(self, context, max_length, stop, **generation_kwargs):
106
+ for key in ("do_sample", "attention_mask"):
107
+ if key in generation_kwargs:
108
+ generation_kwargs.pop(key)
109
+
110
+ # mamba's custom GenerationMixin currently does not support
111
+ # passing stopping criteria.
112
+ # for the time being, we simply generate to max length,
113
+ # then truncate (equivalent result)
114
+ # -- this should be revisited to speed up generation
115
+ # stopping_criteria = stop_sequences_criteria(
116
+ # self.tokenizer, stop, 1, context.shape[0]
117
+ # )
118
+
119
+ return self.model.generate(
120
+ input_ids=context,
121
+ max_length=max_length,
122
+ # stopping_criteria=stopping_criteria,
123
+ # pad_token_id=self.tokenizer.pad_token_id,
124
+ # use_cache=True,
125
+ **generation_kwargs,
126
+ )
lm-evaluation/build/lib/lm_eval/models/nemo_lm.py ADDED
@@ -0,0 +1,537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import importlib
16
+ import pathlib
17
+ from copy import deepcopy
18
+ from typing import List, Literal
19
+
20
+ import filelock
21
+ import numpy as np
22
+ import torch
23
+ from tqdm import tqdm
24
+
25
+ from lm_eval.api.instance import Instance
26
+ from lm_eval.api.model import LM
27
+ from lm_eval.api.registry import register_model
28
+ from lm_eval.models.utils import Collator
29
+ from lm_eval.utils import (
30
+ eval_logger,
31
+ get_rolling_token_windows,
32
+ make_disjoint_window,
33
+ simple_parse_args_string,
34
+ )
35
+
36
+
37
+ def _patch_pretrained_cfg(
38
+ pretrained_cfg, trainer, tensor_model_parallel_size, pipeline_model_parallel_size
39
+ ):
40
+ try:
41
+ import omegaconf
42
+ except ModuleNotFoundError:
43
+ raise Exception(
44
+ "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed"
45
+ "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, "
46
+ "or installing nemo following https://github.com/NVIDIA/NeMo.",
47
+ )
48
+
49
+ omegaconf.OmegaConf.set_struct(pretrained_cfg, True)
50
+ with omegaconf.open_dict(pretrained_cfg):
51
+ attributes_to_update = {
52
+ "sequence_parallel": False,
53
+ "activations_checkpoint_granularity": None,
54
+ "activations_checkpoint_method": None,
55
+ "precision": trainer.precision,
56
+ "global_batch_size": None,
57
+ "tensor_model_parallel_size": tensor_model_parallel_size,
58
+ "pipeline_model_parallel_size": pipeline_model_parallel_size,
59
+ "apply_rope_fusion": False,
60
+ }
61
+ for name, value in attributes_to_update.items():
62
+ if hasattr(pretrained_cfg, name):
63
+ pretrained_cfg[name] = value
64
+ return pretrained_cfg
65
+
66
+
67
+ def _get_target_from_class(target_class) -> str:
68
+ return f"{target_class.__module__}.{target_class.__name__}"
69
+
70
+
71
+ def load_model(
72
+ model_path: str,
73
+ trainer,
74
+ tensor_model_parallel_size: int,
75
+ pipeline_model_parallel_size: int,
76
+ ) -> torch.nn.Module:
77
+ try:
78
+ from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import (
79
+ MegatronGPTModel,
80
+ )
81
+ from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector
82
+ except ModuleNotFoundError:
83
+ raise Exception(
84
+ "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed"
85
+ "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, "
86
+ "or installing nemo following https://github.com/NVIDIA/NeMo.",
87
+ )
88
+ model_path = pathlib.Path(model_path)
89
+
90
+ save_restore_connector = NLPSaveRestoreConnector()
91
+ if model_path.is_dir():
92
+ save_restore_connector.model_extracted_dir = model_path.as_posix()
93
+ pretrained_cfg = save_restore_connector.restore_from(
94
+ None, model_path.as_posix(), return_config=True, trainer=trainer
95
+ )
96
+ if not hasattr(pretrained_cfg, "target"):
97
+ pretrained_cfg["target"] = _get_target_from_class(MegatronGPTModel)
98
+
99
+ pretrained_cfg = _patch_pretrained_cfg(
100
+ pretrained_cfg,
101
+ trainer,
102
+ tensor_model_parallel_size=tensor_model_parallel_size,
103
+ pipeline_model_parallel_size=pipeline_model_parallel_size,
104
+ )
105
+
106
+ model_to_load_path = model_path
107
+ override_config = pretrained_cfg
108
+
109
+ module_name, class_name = override_config.target.rsplit(".", 1)
110
+ model_class = getattr(importlib.import_module(module_name), class_name)
111
+
112
+ # monkeypatch _build_tokenizer method to be process-safe
113
+ tokenizer_lock = filelock.FileLock(f"/tmp/{model_path.name}.tokenizer.lock")
114
+
115
+ def _synced_build_tokenizer(self):
116
+ with tokenizer_lock:
117
+ self._original_build_tokenizer()
118
+
119
+ model_class._original_build_tokenizer = model_class._build_tokenizer
120
+ model_class._build_tokenizer = _synced_build_tokenizer
121
+
122
+ model = model_class.restore_from(
123
+ restore_path=model_to_load_path.as_posix(),
124
+ trainer=trainer,
125
+ override_config_path=override_config,
126
+ save_restore_connector=save_restore_connector,
127
+ map_location=f"cuda:{trainer.local_rank}",
128
+ )
129
+
130
+ model.freeze()
131
+ model.training = False
132
+ try:
133
+ # Have to turn off activations_checkpoint_method for inference
134
+ model.model.language_model.encoder.activations_checkpoint_method = None
135
+ except AttributeError:
136
+ pass
137
+ return model
138
+
139
+
140
+ def setup_distributed_environment(trainer):
141
+ try:
142
+ from nemo.utils.app_state import AppState
143
+ except ModuleNotFoundError:
144
+ raise Exception(
145
+ "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed"
146
+ "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, "
147
+ "or installing nemo following https://github.com/NVIDIA/NeMo.",
148
+ )
149
+
150
+ def dummy():
151
+ return
152
+
153
+ if trainer.strategy.launcher is not None:
154
+ trainer.strategy.launcher.launch(dummy, trainer=trainer)
155
+ trainer.strategy.setup_environment()
156
+
157
+ app_state = AppState()
158
+
159
+ return app_state
160
+
161
+
162
+ @register_model("nemo_lm")
163
+ class NeMoLM(LM):
164
+ def __init__(
165
+ self,
166
+ path: str,
167
+ max_length: int = 4096,
168
+ batch_size: int = 1,
169
+ max_gen_toks: int = 256,
170
+ devices: int = 1,
171
+ num_nodes: int = 1,
172
+ tensor_model_parallel_size: int = 1,
173
+ pipeline_model_parallel_size: int = 1,
174
+ precision: Literal[
175
+ "16-mixed",
176
+ "bf16-mixed",
177
+ "32-true",
178
+ "64-true",
179
+ 64,
180
+ 32,
181
+ 16,
182
+ "64",
183
+ "32",
184
+ "16",
185
+ "bf16",
186
+ ] = "bf16",
187
+ **kwargs,
188
+ ):
189
+ try:
190
+ from nemo.collections.nlp.modules.common.text_generation_utils import (
191
+ generate,
192
+ )
193
+ from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
194
+ from pytorch_lightning.trainer.trainer import Trainer
195
+
196
+ self.generate = generate
197
+ except ModuleNotFoundError:
198
+ raise Exception(
199
+ "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed"
200
+ "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, "
201
+ "or installing nemo following https://github.com/NVIDIA/NeMo.",
202
+ )
203
+
204
+ super().__init__()
205
+
206
+ if (
207
+ tensor_model_parallel_size == 1
208
+ and pipeline_model_parallel_size == 1
209
+ and devices > 1
210
+ ):
211
+ eval_logger.info(
212
+ f"The number of data replicas for evaluation is {devices}."
213
+ )
214
+ eval_logger.info(f"The total number of devices is {devices}.")
215
+ eval_logger.info(
216
+ "No tensor parallelism or pipeline parallelism is applied."
217
+ )
218
+
219
+ elif tensor_model_parallel_size * pipeline_model_parallel_size == devices:
220
+ eval_logger.info(
221
+ f"Setting tensor parallelism to {tensor_model_parallel_size} and pipeline parallelism to {pipeline_model_parallel_size}."
222
+ )
223
+ eval_logger.info(f"The total number of devices is {devices}.")
224
+ eval_logger.info("No data parallelism is applied.")
225
+
226
+ else:
227
+ raise ValueError(
228
+ "Please set the product of tensor_model_parallel_size and pipeline_model_parallel_size"
229
+ "equal to the specified number of devices."
230
+ )
231
+
232
+ if num_nodes > 1:
233
+ raise ValueError(
234
+ "A number of nodes greater than 1 is not supported yet. Please set num_nodes as 1."
235
+ )
236
+
237
+ trainer = Trainer(
238
+ strategy=NLPDDPStrategy(),
239
+ devices=devices,
240
+ accelerator="gpu",
241
+ num_nodes=num_nodes,
242
+ precision=precision,
243
+ logger=False,
244
+ enable_checkpointing=False,
245
+ use_distributed_sampler=False,
246
+ )
247
+ # Modify the following flags only for data replication
248
+ if (
249
+ tensor_model_parallel_size == 1
250
+ and pipeline_model_parallel_size == 1
251
+ and devices > 1
252
+ ):
253
+ self._device = torch.device(f"cuda:{trainer.global_rank}")
254
+ self._rank = trainer.global_rank
255
+ self._world_size = trainer.world_size
256
+ self.model = load_model(
257
+ path,
258
+ trainer,
259
+ tensor_model_parallel_size=tensor_model_parallel_size,
260
+ pipeline_model_parallel_size=pipeline_model_parallel_size,
261
+ ).cuda()
262
+ self.tokenizer = self.model.tokenizer
263
+ self.app_state = setup_distributed_environment(trainer)
264
+
265
+ self._max_length = max_length
266
+ self._batch_size = int(batch_size)
267
+ self._max_gen_toks = max_gen_toks
268
+
269
+ @classmethod
270
+ def create_from_arg_string(cls, arg_string, additional_config=None):
271
+ args = simple_parse_args_string(arg_string)
272
+ if additional_config:
273
+ args["batch_size"] = additional_config.get("batch_size", 1)
274
+
275
+ return cls(**args)
276
+
277
+ @property
278
+ def eot_token_id(self):
279
+ try:
280
+ return self.tokenizer.eos_id
281
+ except AttributeError:
282
+ return None
283
+
284
+ @property
285
+ def max_length(self):
286
+ return self._max_length
287
+
288
+ @property
289
+ def max_gen_toks(self):
290
+ return self._max_gen_toks
291
+
292
+ @property
293
+ def batch_size(self):
294
+ return self._batch_size
295
+
296
+ @property
297
+ def device(self):
298
+ return self._device
299
+
300
+ @property
301
+ def rank(self):
302
+ return self._rank
303
+
304
+ @property
305
+ def world_size(self):
306
+ return self._world_size
307
+
308
+ @property
309
+ def accelerator(self):
310
+ return self._Accelerator(self.world_size)
311
+
312
+ class _Accelerator:
313
+ def __init__(self, world_size):
314
+ self.world_size = world_size
315
+
316
+ def wait_for_everyone(self):
317
+ torch.distributed.barrier()
318
+
319
+ def gather(self, local_tensor):
320
+ gathered_tensors = [
321
+ torch.zeros(1, dtype=local_tensor.dtype).cuda()
322
+ for _ in range(self.world_size)
323
+ ]
324
+ torch.distributed.all_gather(gathered_tensors, local_tensor)
325
+ return torch.cat(gathered_tensors)
326
+
327
+ def tok_encode(self, string: str):
328
+ return self.tokenizer.text_to_ids(string)
329
+
330
+ def tok_decode(self, tokens):
331
+ return self.tokenizer.ids_to_text(tokens)
332
+
333
+ def _encode_pair(self, context, continuation):
334
+ n_spaces = len(context) - len(context.rstrip())
335
+ if n_spaces > 0:
336
+ continuation = context[-n_spaces:] + continuation
337
+ context = context[:-n_spaces]
338
+ whole_enc = self.tok_encode(context + continuation)
339
+ context_enc = self.tok_encode(context)
340
+ context_enc_len = len(context_enc)
341
+ continuation_enc = whole_enc[context_enc_len:]
342
+ return context_enc, continuation_enc
343
+
344
+ def loglikelihood(self, requests):
345
+ new_reqs = []
346
+ for context, continuation in [req.args for req in requests]:
347
+ if context == "":
348
+ # end of text as context
349
+ context_enc, continuation_enc = (
350
+ [self.eot_token_id],
351
+ self.tok_encode(continuation),
352
+ )
353
+ else:
354
+ context_enc, continuation_enc = self._encode_pair(context, continuation)
355
+
356
+ new_reqs.append(((context, continuation), context_enc, continuation_enc))
357
+
358
+ return self._loglikelihood_tokens(new_reqs)
359
+
360
+ def loglikelihood_rolling(
361
+ self, requests: List[Instance], disable_tqdm: bool = False
362
+ ) -> List[float]:
363
+ loglikelihoods = []
364
+
365
+ for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm):
366
+ rolling_token_windows = list(
367
+ map(
368
+ make_disjoint_window,
369
+ get_rolling_token_windows(
370
+ token_list=self.tok_encode(string),
371
+ prefix_token=self.eot_token_id,
372
+ max_seq_len=self.max_length - 1,
373
+ context_len=1,
374
+ ),
375
+ )
376
+ )
377
+
378
+ rolling_token_windows = [(None,) + x for x in rolling_token_windows]
379
+
380
+ string_nll = self._loglikelihood_tokens(
381
+ rolling_token_windows,
382
+ )
383
+
384
+ # discard is_greedy
385
+ string_nll = [x[0] for x in string_nll]
386
+
387
+ string_nll = sum(string_nll)
388
+ loglikelihoods.append(string_nll)
389
+ return loglikelihoods
390
+
391
+ def _loglikelihood_tokens(self, requests, disable_tqdm=False):
392
+ res = []
393
+
394
+ def _collate(x):
395
+ toks = x[1] + x[2]
396
+ return -len(toks), tuple(toks)
397
+
398
+ re_ord = Collator(requests, sort_fn=_collate)
399
+ chunks = re_ord.get_batched(n=self.batch_size, batch_fn=None)
400
+ pbar = tqdm(
401
+ total=len(requests),
402
+ disable=(disable_tqdm or (self.rank != 0)),
403
+ desc="Running loglikelihood requests",
404
+ )
405
+ for chunk in chunks:
406
+ inps = []
407
+ ctxlens = []
408
+ contlens = []
409
+
410
+ for _, context_enc, continuation_enc in chunk:
411
+ # Leave one token for generation. Tokens_to_generate = 0 breaks NeMo.
412
+ inp = (context_enc + continuation_enc)[-(self.max_length - 1) :]
413
+
414
+ ctxlen = len(context_enc) - max(
415
+ 0, len(context_enc) + len(continuation_enc) - (self.max_length - 1)
416
+ )
417
+ ctxlens.append(ctxlen)
418
+ contlens.append(len(continuation_enc))
419
+
420
+ inps.append(self.tok_decode(inp))
421
+
422
+ output = self.generate(
423
+ self.model,
424
+ inputs=inps,
425
+ tokens_to_generate=1,
426
+ min_tokens_to_generate=1,
427
+ compute_logprob=True,
428
+ all_probs=True,
429
+ )
430
+
431
+ batch_token_ids = np.asarray(output["token_ids"])[:, :-1]
432
+ batch_logprobs = output["logprob"][:, :-1]
433
+ batch_full_logprob = output["full_logprob"][:, :-1, :]
434
+
435
+ # Compute greedy tokens for entire batch rather than calling it with proper ctxlen for each sample.
436
+ # Additional tokens for each sample will be trimmed later.
437
+ min_ctxlen = min(ctxlens)
438
+
439
+ # Use min_ctxlen-1 instead of min_ctxlen since full_logprobs are not returns for the first token.
440
+ batch_greedy_tokens = (
441
+ torch.argmax(batch_full_logprob[:, min_ctxlen - 1 :, :], -1)
442
+ .cpu()
443
+ .numpy()
444
+ )
445
+
446
+ for token_ids, greedy_tokens, logprobs, ctxlen, contlen, (
447
+ cache_key,
448
+ _,
449
+ _,
450
+ ) in zip(
451
+ batch_token_ids,
452
+ batch_greedy_tokens,
453
+ batch_logprobs,
454
+ ctxlens,
455
+ contlens,
456
+ chunk,
457
+ ):
458
+ # Trim at contlen since shorter contexts in a batch will have more than one token generated.
459
+ # Use ctxlen-1 instead of ctxlen same as for full_logprob in batch_greedy_tokens calculation
460
+ logprobs = (logprobs[ctxlen - 1 :])[:contlen]
461
+ logprob = sum(logprobs).tolist()
462
+
463
+ continuation_tokens = (token_ids[ctxlen:])[:contlen]
464
+ len_diff = ctxlen - min_ctxlen
465
+ is_greedy = continuation_tokens == (greedy_tokens[len_diff:])[:contlen]
466
+ if not isinstance(is_greedy, bool):
467
+ is_greedy = is_greedy.all()
468
+ answer = (logprob, is_greedy)
469
+
470
+ if cache_key is not None:
471
+ self.cache_hook.add_partial("loglikelihood", cache_key, answer)
472
+
473
+ res.append(answer)
474
+ pbar.update(1)
475
+
476
+ pbar.close()
477
+
478
+ return re_ord.get_original(res)
479
+
480
+ def generate_until(self, requests):
481
+ if not requests:
482
+ return []
483
+ res = []
484
+
485
+ def get_until(req_args):
486
+ until = req_args.get("until", [])
487
+ until = deepcopy(until) # prevent from modifying req_args for cache_key
488
+ if self.eot_token_id not in until:
489
+ until.append(self.eot_token_id)
490
+ return until
491
+
492
+ def _collate(x):
493
+ toks = self.tok_encode(x[0])
494
+ return len(toks), x[0]
495
+
496
+ re_ords = Collator(
497
+ [reg.args for reg in requests], sort_fn=_collate, group_by="gen_kwargs"
498
+ )
499
+ chunks = re_ords.get_batched(n=self.batch_size, batch_fn=None)
500
+ for chunk in chunks:
501
+ contexts, all_gen_kwargs = zip(*chunk)
502
+ # we assume all gen kwargs in the batch are the same
503
+ # this is safe to assume because the `grouper` object ensures it.
504
+ req_args = all_gen_kwargs[0]
505
+ # unpack our keyword arguments.
506
+ until = get_until(req_args)
507
+ max_gen_toks = req_args.get("max_gen_toks", self.max_gen_toks)
508
+
509
+ remaining_length = self.max_length - max_gen_toks
510
+ contexts = []
511
+ for context, _ in chunk:
512
+ encoded_context = self.tok_encode(context)
513
+ encoded_context = encoded_context[-remaining_length:]
514
+ contexts.append(self.tok_decode(encoded_context))
515
+
516
+ output = self.generate(
517
+ self.model,
518
+ inputs=contexts,
519
+ tokens_to_generate=max_gen_toks,
520
+ end_strings=until,
521
+ greedy=True,
522
+ )
523
+
524
+ answers = output["sentences"]
525
+
526
+ continuations = []
527
+ for context, answer in zip(contexts, answers):
528
+ continuations.append(answer[len(context) :])
529
+
530
+ for term in until:
531
+ continuations = [answer.split(term)[0] for answer in continuations]
532
+
533
+ for request, answer in zip(chunk, continuations):
534
+ self.cache_hook.add_partial("greedy_until", request, answer)
535
+ res.append(answer)
536
+
537
+ return re_ords.get_original(res)
lm-evaluation/build/lib/lm_eval/models/neuron_optimum.py ADDED
@@ -0,0 +1,736 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import json
3
+ import logging
4
+ import subprocess
5
+ from collections import defaultdict
6
+ from typing import List, Optional, Union
7
+
8
+ import torch
9
+ import torch.nn.functional as F
10
+ import transformers
11
+ from packaging import version
12
+ from tqdm import tqdm
13
+ from transformers import GenerationConfig
14
+ from transformers.generation import StoppingCriteriaList
15
+
16
+ import lm_eval.models.utils
17
+ from lm_eval import utils
18
+ from lm_eval.api.model import TemplateLM
19
+ from lm_eval.api.registry import register_model
20
+ from lm_eval.models.utils import stop_sequences_criteria
21
+
22
+
23
+ try:
24
+ NEURON_AVAILABLE = True
25
+ from optimum.neuron import NeuronModelForCausalLM
26
+ from optimum.neuron.generation import TokenSelector
27
+ from optimum.neuron.version import __version__ as optimum_neuron_version
28
+ except ImportError:
29
+ NeuronModelForCausalLM = object
30
+ NEURON_AVAILABLE = False
31
+
32
+
33
+ logger = logging.getLogger(__name__)
34
+
35
+
36
+ def get_nc_count() -> Union[int, None]:
37
+ """Returns the number of neuron cores on the current instance."""
38
+ try:
39
+ cmd = "neuron-ls --json-output"
40
+ result = subprocess.run(cmd, shell=True, capture_output=True)
41
+ print(f"inferring nc_count from `neuron-ls` {result.stdout}")
42
+ json_output = json.loads(result.stdout)
43
+ count = sum([x["nc_count"] for x in json_output])
44
+ print(f"nc_count={count}")
45
+ return count
46
+ except Exception:
47
+ return None
48
+
49
+
50
+ def wrap_constant_batch_size(func):
51
+ def _decorator(self, input_ids):
52
+ """input_ids a 2D array with batch_size on dim=0
53
+
54
+ makes sure the func runs with self.batch_size
55
+ """
56
+ # access a from TestSample
57
+ batch_size = input_ids.shape[0]
58
+
59
+ if batch_size < self.batch_size:
60
+ # handle the event of input_ids.shape[0] != batch_size
61
+ # Neuron cores expect constant batch_size
62
+ input_ids = torch.concat(
63
+ (
64
+ input_ids,
65
+ # add missing_batch_size dummy
66
+ torch.zeros(
67
+ [self.batch_size - batch_size, *input_ids.size()[1:]],
68
+ dtype=input_ids.dtype,
69
+ device=input_ids.device,
70
+ ),
71
+ ),
72
+ dim=0,
73
+ )
74
+ elif batch_size > self.batch_size:
75
+ raise ValueError(
76
+ f"The specified batch_size ({batch_size}) exceeds the model static batch size ({self.batch_size})"
77
+ )
78
+ # return the forward pass that requires constant batch size
79
+ return func(self, input_ids)[:batch_size]
80
+
81
+ return _decorator
82
+
83
+
84
+ class CustomNeuronModelForCausalLM(NeuronModelForCausalLM):
85
+ """NeuronModelForCausalLM with `stopping_criteria` in `generate`"""
86
+
87
+ def generate(
88
+ self,
89
+ input_ids: torch.Tensor,
90
+ attention_mask: Optional[torch.Tensor] = None,
91
+ stopping_criteria: Optional["StoppingCriteriaList"] = None,
92
+ generation_config: Optional["GenerationConfig"] = None,
93
+ **kwargs,
94
+ ) -> torch.LongTensor:
95
+ r"""
96
+ A streamlined generate() method overriding the transformers.GenerationMixin.generate() method.
97
+
98
+ This method uses the same logits processors/warpers and stopping criteria as the transformers library
99
+ `generate()` method but restricts the generation to greedy search and sampling.
100
+
101
+ It does not support transformers `generate()` advanced options.
102
+
103
+ Please refer to https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.GenerationMixin.generate
104
+ for details on generation configuration.
105
+
106
+ Parameters:
107
+ input_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`):
108
+ The sequence used as a prompt for the generation.
109
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
110
+ Mask to avoid performing attention on padding token indices.
111
+ generation_config (`~transformers.generation.GenerationConfig`, *optional*):
112
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
113
+ passed to generate matching the attributes of `generation_config` will override them. If
114
+ `generation_config` is not provided, default will be used, which had the following loading
115
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
116
+ configuration. Please note that unspecified parameters will inherit [`~transformers.generation.GenerationConfig`]'s
117
+ default values, whose documentation should be checked to parameterize generation.
118
+
119
+ Returns:
120
+ `torch.Tensor`: A `torch.FloatTensor`.
121
+ """
122
+ # The actual generation configuration is a combination of config and parameters
123
+ generation_config = copy.deepcopy(
124
+ self.generation_config if generation_config is None else generation_config
125
+ )
126
+ model_kwargs = generation_config.update(
127
+ **kwargs
128
+ ) # All unused kwargs must be model kwargs
129
+ # Check model kwargs are actually used by either prepare_inputs_for_generation or forward
130
+ self._validate_model_kwargs(model_kwargs)
131
+
132
+ # Instantiate a TokenSelector for the specified configuration
133
+ selector = TokenSelector.create(
134
+ input_ids, generation_config, self, self.max_length
135
+ )
136
+ selector.stopping_criteria.append(stopping_criteria)
137
+ # Verify that the inputs are compatible with the model static input dimensions
138
+ batch_size, sequence_length = input_ids.shape
139
+ if sequence_length > self.max_length:
140
+ raise ValueError(
141
+ f"The input sequence length ({sequence_length}) exceeds the model static sequence length ({self.max_length})"
142
+ )
143
+ padded_input_ids = input_ids
144
+ padded_attention_mask = attention_mask
145
+ if batch_size > self.batch_size:
146
+ raise ValueError(
147
+ f"The specified batch_size ({batch_size}) exceeds the model static batch size ({self.batch_size})"
148
+ )
149
+ elif batch_size < self.batch_size:
150
+ logger.warning(
151
+ "Inputs will be padded to match the model static batch size. This will increase latency."
152
+ )
153
+ padding_shape = [self.batch_size - batch_size, sequence_length]
154
+ padding = torch.full(
155
+ padding_shape, fill_value=self.config.eos_token_id, dtype=torch.int64
156
+ )
157
+ padded_input_ids = torch.cat([input_ids, padding])
158
+ if attention_mask is not None:
159
+ padding = torch.zeros(padding_shape, dtype=torch.int64)
160
+ padded_attention_mask = torch.cat([attention_mask, padding])
161
+ # Drop the current generation context and clear the Key/Value cache
162
+ self.reset_generation()
163
+
164
+ output_ids = self.generate_tokens(
165
+ padded_input_ids,
166
+ selector,
167
+ batch_size,
168
+ attention_mask=padded_attention_mask,
169
+ **model_kwargs,
170
+ )
171
+ return output_ids[:batch_size, :]
172
+
173
+
174
+ @register_model("neuronx")
175
+ class NEURON_HF(TemplateLM):
176
+ """
177
+ Enables usage with on AWS Neuron
178
+ using the HuggingFace Transformers + Transformers neuronx library.
179
+ Tested with neuron 2.17.0
180
+ """
181
+
182
+ _DEFAULT_MAX_LENGTH = 2048
183
+
184
+ def __init__(
185
+ self,
186
+ pretrained: Optional[str] = "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
187
+ revision: Optional[str] = "main",
188
+ tp_degree: Optional[int] = None,
189
+ subfolder: Optional[str] = None,
190
+ tokenizer: Optional[str] = None,
191
+ truncation: Optional[bool] = False,
192
+ max_length: Optional[int] = None,
193
+ dtype: Optional[Union[str, torch.dtype]] = "auto",
194
+ batch_size: Optional[int] = 1,
195
+ low_cpu_mem_usage: Optional[bool] = True,
196
+ trust_remote_code: Optional[bool] = False,
197
+ use_fast_tokenizer: Optional[bool] = True,
198
+ add_bos_token: Optional[bool] = False,
199
+ ) -> None:
200
+ if not NEURON_AVAILABLE:
201
+ raise Exception(
202
+ "Tried to load neuron model, but neuron is not installed ",
203
+ "please install neuron via pip install transformers-neuron ",
204
+ "also make sure you are running on an AWS inf2 instance",
205
+ )
206
+ if version.parse(optimum_neuron_version) != version.parse("0.0.17"):
207
+ logger.warning(
208
+ '`optimum-neuron` model requires `pip install "optimum[neuronx]>=0.0.17" '
209
+ "preferably using the Hugging Face Neuron Deep Learning AMI (Ubuntu 22.04) "
210
+ "https://aws.amazon.com/marketplace/pp/prodview-gr3e6yiscria2 "
211
+ f"You are using optimum-neuron={optimum_neuron_version}"
212
+ )
213
+ super().__init__()
214
+
215
+ assert isinstance(pretrained, str)
216
+ assert isinstance(batch_size, (int, str))
217
+
218
+ self.batch_size_per_gpu = int(batch_size)
219
+ batch_size = int(batch_size)
220
+ if tp_degree is None:
221
+ # execute `neuron-ls --json-output | jq '.[0].nc_count'``
222
+ # to get the number of neuron cores on your instance
223
+ tp_degree = get_nc_count()
224
+
225
+ assert isinstance(tp_degree, int), (
226
+ f"model_args must include tp_degree. tp_degree must be set to an integer,"
227
+ f" but is tp_degree=`{tp_degree}` with type=`{type(tp_degree)}`."
228
+ "Set it to number of neuron cores on your instance."
229
+ " For inf2.xlarge and inf2.8xlarge, set it to `2`."
230
+ " For inf2.24xlarge, set it to `12`."
231
+ " For inf2.48xlarge, set it to `24`."
232
+ )
233
+
234
+ # TODO: update this to be less of a hack once subfolder is fixed in HF
235
+ revision = revision + ("/" + subfolder if subfolder is not None else "")
236
+
237
+ self._config = transformers.AutoConfig.from_pretrained(
238
+ pretrained,
239
+ revision=revision,
240
+ trust_remote_code=trust_remote_code,
241
+ )
242
+ torch_dtype = lm_eval.models.utils.get_dtype(dtype)
243
+
244
+ assert torch_dtype in [
245
+ torch.float16,
246
+ torch.bfloat16,
247
+ ], "Only float16 and bfloat16 are supported"
248
+
249
+ self.tokenizer = transformers.AutoTokenizer.from_pretrained(
250
+ pretrained if tokenizer is None else tokenizer,
251
+ revision=revision,
252
+ trust_remote_code=trust_remote_code,
253
+ use_fast=use_fast_tokenizer,
254
+ )
255
+
256
+ # Neuron specific code
257
+ if torch_dtype == torch.float16:
258
+ self.amp_dtype = "f16"
259
+ elif torch_dtype == torch.bfloat16:
260
+ self.amp_dtype = "bf16"
261
+ elif torch_dtype == torch.float32:
262
+ self.amp_dtype = "f32"
263
+ else:
264
+ raise NotImplementedError("Only float16 and bfloat16 are implemented.")
265
+
266
+ compiler_args = {"num_cores": tp_degree, "auto_cast_type": self.amp_dtype}
267
+ input_shapes = {
268
+ "batch_size": batch_size,
269
+ "sequence_length": self._DEFAULT_MAX_LENGTH,
270
+ }
271
+
272
+ print(
273
+ f"{'='*20} \n loading model to neuron with"
274
+ f" {compiler_args}, {input_shapes}..."
275
+ )
276
+ self.model = CustomNeuronModelForCausalLM.from_pretrained(
277
+ pretrained,
278
+ revision=revision,
279
+ trust_remote_code=trust_remote_code,
280
+ low_cpu_mem_usage=low_cpu_mem_usage,
281
+ export=True,
282
+ **compiler_args,
283
+ **input_shapes,
284
+ )
285
+ print(f"SUCCESS: neuron model compiled. \n {'='*20}")
286
+
287
+ self.truncation = truncation
288
+
289
+ self.vocab_size = self.tokenizer.vocab_size
290
+ self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
291
+ self.add_bos_token = self.add_bos_token
292
+
293
+ self._max_length = max_length
294
+
295
+ self.batch_schedule = 1
296
+ self.batch_sizes = {}
297
+
298
+ @property
299
+ def config(self):
300
+ # return the associated transformers.AutoConfig for the given pretrained model.
301
+ return self._config
302
+
303
+ @property
304
+ def eot_token_id(self):
305
+ # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
306
+ return self.tokenizer.eos_token_id
307
+
308
+ @property
309
+ def prefix_token_id(self):
310
+ # it is used as prefix for loglikelihood
311
+ return self.tokenizer.bos_token_id or self.tokenizer.eos_token_id
312
+
313
+ @property
314
+ def max_length(self):
315
+ if self._max_length: # if max length manually set, return it
316
+ return self._max_length
317
+ seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx")
318
+ for attr in seqlen_config_attrs:
319
+ if hasattr(self.model.config, attr):
320
+ return getattr(self.model.config, attr)
321
+ if hasattr(self.tokenizer, "model_max_length"):
322
+ if self.tokenizer.model_max_length == 1000000000000000019884624838656:
323
+ return self._DEFAULT_MAX_LENGTH
324
+ return self.tokenizer.model_max_length
325
+ return self._DEFAULT_MAX_LENGTH
326
+
327
+ @property
328
+ def max_gen_toks(self) -> int:
329
+ return 256
330
+
331
+ @property
332
+ def batch_size(self):
333
+ return self.batch_size_per_gpu
334
+
335
+ @property
336
+ def device(self):
337
+ """device are neuron cores, but the created tensors are on CPU."""
338
+ return "cpu"
339
+
340
+ @property
341
+ def rank(self):
342
+ return 0
343
+
344
+ @property
345
+ def world_size(self):
346
+ return 1
347
+
348
+ def tok_encode(self, string: str, left_truncate_len=None, add_special_tokens=None):
349
+ """ """
350
+ if add_special_tokens is None:
351
+ add_special_tokens = False or self.add_bos_token
352
+
353
+ encoding = self.tokenizer.encode(string, add_special_tokens=add_special_tokens)
354
+
355
+ # left-truncate the encoded context to be at most `left_truncate_len` tokens long
356
+ if left_truncate_len:
357
+ encoding = encoding[-left_truncate_len:]
358
+
359
+ return encoding
360
+
361
+ def tok_batch_encode(
362
+ self,
363
+ strings: List[str],
364
+ padding_side: str = "left",
365
+ left_truncate_len: int = None,
366
+ truncation: bool = False,
367
+ ):
368
+ # encode a batch of strings. converts to tensors and pads automatically, unlike tok_encode.
369
+ old_padding_side = self.tokenizer.padding_side
370
+ self.tokenizer.padding_side = padding_side
371
+
372
+ add_special_tokens = False or self.add_bos_token
373
+
374
+ encoding = self.tokenizer(
375
+ strings,
376
+ truncation=truncation,
377
+ padding="longest",
378
+ return_tensors="pt",
379
+ add_special_tokens=add_special_tokens,
380
+ )
381
+ if left_truncate_len:
382
+ encoding["input_ids"] = encoding["input_ids"][:, -left_truncate_len:]
383
+ encoding["attention_mask"] = encoding["attention_mask"][
384
+ :, -left_truncate_len:
385
+ ]
386
+ self.tokenizer.padding_side = old_padding_side
387
+
388
+ return encoding["input_ids"], encoding["attention_mask"]
389
+
390
+ def tok_decode(self, tokens):
391
+ return self.tokenizer.decode(tokens)
392
+
393
+ @wrap_constant_batch_size
394
+ def _model_call(self, input_ids: torch.Tensor):
395
+ """
396
+ get logits for the entire sequence
397
+
398
+ :param input_ids: torch.Tensor
399
+ A torch tensor of shape [batch, sequence_cont]
400
+ the size of sequence may vary from call to call
401
+ :return
402
+ A torch tensor of shape [batch, sequence, vocab] with the
403
+ logits returned from the model's decoder-lm head
404
+ """
405
+ _, sequence_length = input_ids.shape
406
+
407
+ with torch.inference_mode():
408
+ cache_ids = torch.arange(0, sequence_length, dtype=torch.int32).split(1)
409
+ input_ids_split = input_ids.split(1, dim=1)
410
+
411
+ return torch.concat(
412
+ [
413
+ self.model.forward(
414
+ input_ids=input_id, cache_ids=cache_id, return_dict=False
415
+ )[0]
416
+ for input_id, cache_id in zip(input_ids_split, cache_ids)
417
+ ],
418
+ dim=1,
419
+ )
420
+
421
+ def _model_generate(self, context, max_length, stop, **generation_kwargs):
422
+ # we require users to pass do_sample=True explicitly
423
+ # for non-greedy gen. This should be reevaluated when considering beam search.
424
+
425
+ with torch.inference_mode():
426
+ if "do_sample" not in generation_kwargs.keys():
427
+ generation_kwargs["do_sample"] = False
428
+
429
+ stopping_criteria = stop_sequences_criteria(
430
+ self.tokenizer,
431
+ stop + [self.tokenizer.decode([self.config.eos_token_id])],
432
+ 1,
433
+ context.shape[0],
434
+ )
435
+
436
+ return self.model.generate(
437
+ input_ids=context,
438
+ max_length=max_length,
439
+ stopping_criteria=stopping_criteria,
440
+ pad_token_id=self.eot_token_id,
441
+ use_cache=True,
442
+ **generation_kwargs,
443
+ )
444
+
445
+ def _select_cont_toks(self, logits, contlen=None, inplen=None):
446
+ assert (
447
+ contlen and inplen
448
+ ), "Must pass input len and cont. len to select scored logits for causal LM"
449
+ # discard right-padding.
450
+ # also discard the input/context tokens. we'll only score continuations.
451
+ logits = logits[inplen - contlen : inplen]
452
+
453
+ return logits
454
+
455
+ def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
456
+ loglikelihoods = []
457
+
458
+ adaptive_batch_size = None
459
+
460
+ for (string,) in tqdm(
461
+ [req.args for req in requests], disable=(disable_tqdm or (self.rank != 0))
462
+ ):
463
+ rolling_token_windows = list(
464
+ map(
465
+ utils.make_disjoint_window,
466
+ utils.get_rolling_token_windows(
467
+ token_list=self.tok_encode(string),
468
+ prefix_token=self.prefix_token_id,
469
+ max_seq_len=self.max_length,
470
+ context_len=1,
471
+ ),
472
+ )
473
+ )
474
+
475
+ # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case
476
+ rolling_token_windows = [(None,) + x for x in rolling_token_windows]
477
+
478
+ pad_amnt = 0
479
+ if self.world_size > 1:
480
+ # We pad out the external document-level iterator so the inner iterator doesn't hang
481
+ mytensor = torch.tensor(len(rolling_token_windows), device=self.device)
482
+ gathered = (
483
+ self.accelerator.gather(mytensor).cpu().detach().numpy().tolist()
484
+ )
485
+
486
+ pad_amnt = max(gathered) - gathered[self.rank]
487
+ if pad_amnt > 0:
488
+ rolling_token_windows += pad_amnt * [rolling_token_windows[0]]
489
+
490
+ string_nll = self._loglikelihood_tokens(
491
+ rolling_token_windows,
492
+ disable_tqdm=True,
493
+ override_bs=adaptive_batch_size,
494
+ )
495
+
496
+ if (self.world_size > 1) and (pad_amnt > 0):
497
+ string_nll = [x[0] for x in string_nll[:-pad_amnt]]
498
+ else:
499
+ # discard is_greedy
500
+ string_nll = [x[0] for x in string_nll]
501
+
502
+ string_nll = sum(string_nll)
503
+ loglikelihoods.append(string_nll)
504
+
505
+ return loglikelihoods
506
+
507
+ def _loglikelihood_tokens(
508
+ self, requests, disable_tqdm: bool = False, override_bs=None
509
+ ):
510
+ # TODO: implement some kind of efficient-request-middleware that lumps together requests with the same context
511
+ res = []
512
+
513
+ def _collate(x):
514
+ # the negative sign on len(toks) sorts descending - this has a few advantages:
515
+ # - time estimates will always be over not underestimates, which is more useful for planning
516
+ # - to know the size of a batch when going through the list, you know the first one is always the batch
517
+ # padded context length. this is useful to simplify the batching logic and more importantly to make
518
+ # automatic adaptive batches much much easier to implement
519
+ # - any OOMs will happen right away rather than near the end
520
+
521
+ toks = x[1] + x[2]
522
+ return -len(toks), tuple(toks)
523
+
524
+ re_ord = utils.Reorderer(requests, _collate)
525
+
526
+ n_reordered_requests = len(re_ord.get_reordered()) # noqa
527
+ # automatic (variable) batch size detection for vectorization
528
+ # pull longest context sample from request
529
+
530
+ chunks = lm_eval.models.utils.chunks(
531
+ re_ord.get_reordered(),
532
+ n=self.batch_size,
533
+ fn=None,
534
+ )
535
+
536
+ for chunk in tqdm(chunks, disable=(disable_tqdm or (self.rank != 0))):
537
+ inps = []
538
+ cont_toks_list = []
539
+ inplens = []
540
+
541
+ conts = [] # noqa
542
+ encoder_attns = [] # noqa
543
+
544
+ padding_len_inp = None
545
+ padding_len_cont = None # noqa
546
+ # because vectorizing is annoying, we first convert each (context, continuation) pair to padded
547
+ # tensors, then we pack them together into a batch, call the model, and then pick it all apart
548
+ # again because vectorizing is annoying
549
+
550
+ for _, context_enc, continuation_enc in chunk:
551
+ # sanity check
552
+ assert len(context_enc) > 0
553
+ assert len(continuation_enc) > 0
554
+ assert len(continuation_enc) <= self.max_length
555
+
556
+ # how this all works (illustrated on a causal decoder-only setup):
557
+ # CTX CONT
558
+ # inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1]
559
+ # model \ \
560
+ # logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the
561
+ # cont_toks 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice
562
+
563
+ # when too long to fit in context, truncate from the left
564
+ inp = torch.tensor(
565
+ (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1],
566
+ dtype=torch.long,
567
+ device=self.device,
568
+ )
569
+ (inplen,) = inp.shape
570
+
571
+ padding_len_inp = (
572
+ max(padding_len_inp, inplen)
573
+ if padding_len_inp is not None
574
+ else inplen
575
+ )
576
+
577
+ inps.append(inp) # [1, inp_length]
578
+ cont_toks_list.append(continuation_enc)
579
+ inplens.append(inplen)
580
+
581
+ # create encoder attn mask and batched conts, if seq2seq
582
+ call_kwargs = {}
583
+ batched_inps = lm_eval.models.utils.pad_and_concat(
584
+ padding_len_inp, inps, padding_side="right"
585
+ ) # [batch, padding_len_inp]
586
+
587
+ multi_logits = F.log_softmax(
588
+ self._model_call(batched_inps, **call_kwargs), dim=-1
589
+ ) # [batch, padding_length (inp or cont), vocab]
590
+
591
+ for (cache_key, _, _), logits, inplen, cont_toks in zip(
592
+ chunk, multi_logits, inplens, cont_toks_list
593
+ ):
594
+ # Slice to original seq length
595
+ contlen = len(cont_toks)
596
+ # take only logits in the continuation
597
+ # (discard context toks if decoder-only ; discard right-padding)
598
+ # also discards + checks for "virtual tokens" in the causal LM's input window
599
+ # from prompt/prefix tuning tokens, if applicable
600
+ ctx_len = inplen + (logits.shape[0] - padding_len_inp)
601
+ logits = self._select_cont_toks(logits, contlen=contlen, inplen=ctx_len)
602
+ logits = logits.unsqueeze(0) # [1, seq, vocab]
603
+
604
+ # Check if per-token argmax is exactly equal to continuation
605
+ greedy_tokens = logits.argmax(dim=-1)
606
+ cont_toks = torch.tensor(
607
+ cont_toks, dtype=torch.long, device=self.device
608
+ ).unsqueeze(0) # [1, seq]
609
+ max_equal = (greedy_tokens == cont_toks).all()
610
+
611
+ # Obtain log-probs at the corresponding continuation token indices
612
+ # last_token_slice = logits[:, -1, :].squeeze(0).tolist()
613
+ logits = torch.gather(logits, 2, cont_toks.unsqueeze(-1)).squeeze(
614
+ -1
615
+ ) # [1, seq]
616
+
617
+ # Answer: (log prob, is-exact-match)
618
+ answer = (float(logits.sum()), bool(max_equal))
619
+
620
+ res.append(answer)
621
+
622
+ self.cache_hook.add_partial("loglikelihood", cache_key, answer)
623
+
624
+ return re_ord.get_original(res)
625
+
626
+ def generate_until(self, requests, disable_tqdm: bool = False):
627
+ res = defaultdict(list)
628
+ re_ords = {}
629
+
630
+ def _collate(x):
631
+ # the negative sign on len(toks) sorts descending - this has a few advantages:
632
+ # - time estimates will always be over not underestimates, which is more useful for planning
633
+ # - to know the size of a batch when going through the list, you know the first one is always the batch
634
+ # padded context length. this is useful to simplify the batching logic and more importantly to make
635
+ # automatic adaptive batches much much easier to implement
636
+ # - any OOMs will happen right away rather than near the end
637
+ toks = self.tok_encode(x[0])
638
+ return -len(toks), x[0]
639
+
640
+ # we group requests by their generation_kwargs,
641
+ # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
642
+ # in the same batch.
643
+ grouper = lm_eval.models.utils.Grouper(requests, lambda x: str(x.args[1]))
644
+ for key, reqs in grouper.get_grouped().items():
645
+ # within each set of reqs for given kwargs, we reorder by token length, descending.
646
+ re_ords[key] = utils.Reorderer([req.args for req in reqs], _collate)
647
+
648
+ pbar = tqdm(total=len(requests), disable=(disable_tqdm or (self.rank != 0)))
649
+
650
+ # for each different set of kwargs, we execute all requests, by batch.
651
+ for key, re_ord in re_ords.items():
652
+ chunks = lm_eval.models.utils.chunks(
653
+ re_ord.get_reordered(), n=self.batch_size
654
+ )
655
+ for chunk in tqdm(chunks, disable=self.rank != 0):
656
+ contexts, all_gen_kwargs = zip(*chunk)
657
+ # we assume all gen kwargs in the batch are the same
658
+ # this is safe to assume because the `grouper` object ensures it.
659
+ gen_kwargs = all_gen_kwargs[0]
660
+ # unpack our keyword arguments.
661
+ until = None
662
+ if isinstance(gen_kwargs, dict):
663
+ kwargs = copy.deepcopy(gen_kwargs) # edge case for repeats > 1
664
+ if "until" in kwargs.keys():
665
+ until = kwargs.pop("until")
666
+ if isinstance(until, str):
667
+ until = [until]
668
+ elif not isinstance(until, list):
669
+ raise ValueError(
670
+ f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}"
671
+ )
672
+ else:
673
+ raise ValueError(
674
+ f"Expected `kwargs` to be of type `dict` but got {kwargs}"
675
+ )
676
+ # add EOS token to stop sequences
677
+ eos = self.tok_decode(self.eot_token_id)
678
+ if not until:
679
+ until = [eos]
680
+ else:
681
+ until.append(eos)
682
+ if "max_gen_toks" in kwargs.keys():
683
+ max_gen_toks = kwargs.pop("max_gen_toks")
684
+ else:
685
+ max_gen_toks = self.max_gen_toks
686
+ # first stop sequence is used to halt generation upon encountering
687
+ primary_until = [until[0]]
688
+
689
+ max_ctx_len = self.max_length - max_gen_toks
690
+
691
+ # encode, pad, and truncate contexts for this batch
692
+ context_enc, attn_masks = self.tok_batch_encode(
693
+ contexts,
694
+ left_truncate_len=max_ctx_len,
695
+ truncation=self.truncation,
696
+ )
697
+ context_enc = context_enc.to(self.device)
698
+ attn_masks = attn_masks.to(self.device)
699
+
700
+ if "max_length" not in kwargs:
701
+ kwargs["max_length"] = context_enc.shape[1] + max_gen_toks
702
+
703
+ # perform batched generation
704
+ cont = self._model_generate(
705
+ context=context_enc,
706
+ attention_mask=attn_masks,
707
+ stop=primary_until,
708
+ **kwargs,
709
+ )
710
+
711
+ cont_toks_list = cont.tolist()
712
+ for cont_toks, context in zip(cont_toks_list, contexts):
713
+ # discard context + left-padding toks if using causal decoder-only LM
714
+ cont_toks = cont_toks[context_enc.shape[1] :]
715
+
716
+ s = self.tok_decode(cont_toks)
717
+
718
+ # use secondary stop seqs to cut off should-have-been-stopped content post-hoc
719
+ for term in until:
720
+ if len(term) > 0:
721
+ # ignore '' separator,
722
+ # for seq2seq case where self.tok_decode(self.eot_token_id) = ''
723
+ s = s.split(term)[0]
724
+
725
+ res[key].append(s)
726
+
727
+ self.cache_hook.add_partial(
728
+ "generate_until", (context, gen_kwargs), s
729
+ )
730
+ pbar.update(1)
731
+ # reorder this group of results back to original unsorted form
732
+ res[key] = re_ord.get_original(res[key])
733
+
734
+ pbar.close()
735
+
736
+ return grouper.get_original(res)
lm-evaluation/build/lib/lm_eval/models/openai_completions.py ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import os
3
+ from collections import defaultdict
4
+ from importlib.util import find_spec
5
+ from typing import List, Literal, Optional, Tuple
6
+
7
+ from tqdm import tqdm
8
+
9
+ import lm_eval.models.utils
10
+ from lm_eval import utils
11
+ from lm_eval.api.model import LM, TemplateLM
12
+ from lm_eval.api.registry import register_model
13
+ from lm_eval.models.utils import retry_on_specific_exceptions
14
+ from lm_eval.utils import eval_logger
15
+
16
+
17
+ def get_result(response, ctxlen: int) -> Tuple[float, bool]:
18
+ """Process results from OpenAI API response.
19
+
20
+ :param response: dict
21
+ OpenAI API Response
22
+ :param ctxlen: int
23
+ Length of context (so we can slice them away and only keep the predictions)
24
+ :return:
25
+ continuation_logprobs: np.array
26
+ Log probabilities of continuation tokens
27
+ is_greedy: bool
28
+ whether argmax matches given continuation exactly
29
+ """
30
+ is_greedy = True
31
+ logprobs = response.logprobs.token_logprobs
32
+ continuation_logprobs = sum(logprobs[ctxlen:])
33
+
34
+ for i in range(ctxlen, len(response.logprobs.token_logprobs)):
35
+ token = response.logprobs.token_logprobs[i]
36
+ top_tokens = response.logprobs.top_logprobs[i]
37
+ top_token = max(top_tokens.keys(), key=lambda x: top_tokens[x])
38
+ if top_token != token:
39
+ is_greedy = False
40
+ break
41
+
42
+ return continuation_logprobs, is_greedy
43
+
44
+
45
+ def oa_completion(client, chat: bool = False, **kwargs):
46
+ """Query OpenAI API for completion.
47
+
48
+ Retry with back-off until they respond
49
+ """
50
+ if not find_spec("openai") or not find_spec("tiktoken"):
51
+ raise Exception(
52
+ "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. "
53
+ "Please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`"
54
+ )
55
+ else:
56
+ import openai
57
+
58
+ def _exception_callback(e: Exception, sleep_time: float) -> None:
59
+ import traceback
60
+
61
+ traceback.print_exc()
62
+
63
+ @retry_on_specific_exceptions(
64
+ on_exceptions=[openai.OpenAIError],
65
+ max_retries=None, # retry forever, consider changing
66
+ on_exception_callback=_exception_callback,
67
+ )
68
+ def completion():
69
+ if chat:
70
+ return client.chat.completions.create(**kwargs)
71
+ else:
72
+ return client.completions.create(**kwargs)
73
+
74
+ return completion()
75
+
76
+
77
+ @register_model("openai-completions", "local-completions")
78
+ class OpenaiCompletionsLM(TemplateLM):
79
+ _DEFAULT_MAX_LENGTH = 2048
80
+
81
+ def __init__(
82
+ self,
83
+ model: str,
84
+ base_url: str = None,
85
+ tokenizer: Optional[str] = None,
86
+ tokenizer_backend: Literal["tiktoken", "huggingface"] = "tiktoken",
87
+ truncate: bool = False,
88
+ max_gen_toks: int = 256,
89
+ batch_size: int = 1,
90
+ seed: int = 1234,
91
+ max_length: Optional[int] = None,
92
+ ) -> None:
93
+ """
94
+
95
+ :param engine: str
96
+ OpenAI API engine (e.g. gpt-3.5-turbo-instruct)
97
+ :param truncate: bool
98
+ Truncate input if too long (if False and input is too long, throw error)
99
+ """
100
+ super().__init__()
101
+ self.seed = seed
102
+ try:
103
+ import openai # noqa: E401
104
+ import tiktoken
105
+ except ModuleNotFoundError:
106
+ raise Exception(
107
+ "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. \
108
+ please install these via `pip install lm-eval[openai]` or `pip install -e .\"[openai]\"`",
109
+ )
110
+ self.model = model
111
+ self.base_url = base_url
112
+ self.tokenizer_backend = tokenizer_backend
113
+ self.truncate = truncate
114
+ self._batch_size = int(batch_size)
115
+ self._max_gen_toks = max_gen_toks
116
+ self._max_length = max_length
117
+
118
+ # if we have a local model, use HF tokenizer over tiktoken
119
+ if self.tokenizer_backend == "huggingface":
120
+ import transformers # noqa: E401
121
+
122
+ self.tokenizer = transformers.AutoTokenizer.from_pretrained(
123
+ tokenizer if tokenizer else self.model
124
+ )
125
+ self.vocab_size = self.tokenizer.vocab
126
+ self.end_of_text_token_id = self.tokenizer.eos_token
127
+ elif self.tokenizer_backend == "tiktoken":
128
+ if self.base_url:
129
+ eval_logger.warning(
130
+ f"Passed `base_url={self.base_url}` but using Tiktoken tokenizer backend. "
131
+ "Pass `tokenizer_backend=huggingface` and provide the HF tokenizer name if your model does not use Tiktoken."
132
+ )
133
+
134
+ self.tokenizer = tiktoken.encoding_for_model(self.model)
135
+ self.vocab_size = self.tokenizer.n_vocab
136
+ self.end_of_text_token_id = self.tokenizer.eot_token
137
+ else:
138
+ raise ValueError(
139
+ f"Expected tokenizer_backend to be one of ['tiktoken', 'huggingface'] but got {self.tokenizer_backend}"
140
+ )
141
+
142
+ # Read from environment variable OPENAI_API_KEY
143
+ # Set to EMPTY for local
144
+ openai.api_key = os.environ["OPENAI_API_KEY"]
145
+ if self.base_url:
146
+ self.client = openai.OpenAI(base_url=self.base_url)
147
+ else:
148
+ self.client = openai.OpenAI()
149
+
150
+ @property
151
+ def eot_token_id(self):
152
+ return self.end_of_text_token_id
153
+
154
+ @property
155
+ def max_length(self) -> int:
156
+ if self._max_length:
157
+ return self._max_length
158
+ else:
159
+ return self._DEFAULT_MAX_LENGTH
160
+
161
+ @property
162
+ def max_gen_toks(self) -> int:
163
+ return self._max_gen_toks
164
+
165
+ @property
166
+ def batch_size(self) -> int:
167
+ return self._batch_size
168
+
169
+ @property
170
+ def device(self):
171
+ # Isn't used because we override _loglikelihood_tokens
172
+ raise NotImplementedError()
173
+
174
+ def tok_encode(self, string: str, **kwargs) -> List[int]:
175
+ return self.tokenizer.encode(string)
176
+
177
+ def tok_decode(self, tokens: List[int]) -> str:
178
+ return self.tokenizer.decode(tokens)
179
+
180
+ def _loglikelihood_tokens(
181
+ self, requests, disable_tqdm: bool = False
182
+ ) -> List[Tuple[float, bool]]:
183
+ res = []
184
+
185
+ def _collate(x):
186
+ # this doesn't efficiently handle last-token differences yet, but those are kinda annoying because
187
+ # it's not guaranteed that the 100 or so logprobs we get to see actually contain all the continuations
188
+ # we care about, and so we need some kind of backup for when it isn't
189
+ toks = x[1] + x[2]
190
+ return -len(toks), tuple(toks)
191
+
192
+ re_ord = utils.Reorderer(requests, _collate)
193
+
194
+ for chunk in tqdm(
195
+ list(lm_eval.models.utils.chunks(re_ord.get_reordered(), self.batch_size)),
196
+ disable=disable_tqdm,
197
+ ):
198
+ inps = []
199
+ ctxlens = []
200
+ for cache_key, context_enc, continuation_enc in chunk:
201
+ # max_length+1 because the API takes up to 2049 tokens, including the first context token
202
+ inp = (context_enc + continuation_enc)[-(self.max_length + 1) :]
203
+ # TODO: the logic is much simpler if we just look at the length of continuation tokens
204
+ ctxlen = len(context_enc) - max(
205
+ 0, len(context_enc) + len(continuation_enc) - (self.max_length + 1)
206
+ )
207
+
208
+ inps.append(inp)
209
+ ctxlens.append(ctxlen)
210
+
211
+ response = oa_completion(
212
+ client=self.client,
213
+ model=self.model,
214
+ prompt=inps,
215
+ echo=True,
216
+ max_tokens=0,
217
+ temperature=0.0,
218
+ logprobs=10,
219
+ seed=self.seed,
220
+ )
221
+
222
+ for resp, ctxlen, (cache_key, context_enc, continuation_enc) in zip(
223
+ response.choices, ctxlens, chunk
224
+ ):
225
+ answer = get_result(resp, ctxlen)
226
+
227
+ res.append(answer)
228
+
229
+ # partial caching
230
+ if cache_key is not None:
231
+ self.cache_hook.add_partial("loglikelihood", cache_key, answer)
232
+ return re_ord.get_original(res)
233
+
234
+ def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]:
235
+ if not requests:
236
+ return []
237
+ res = []
238
+ requests = [req.args for req in requests]
239
+
240
+ def _collate(x):
241
+ toks = self.tok_encode(x[0])
242
+ return len(toks), x[0]
243
+
244
+ re_ord = utils.Reorderer(requests, _collate)
245
+
246
+ def sameuntil_chunks(xs, size):
247
+ ret = []
248
+ lastuntil = xs[0][1]
249
+ for x in xs:
250
+ if len(ret) >= size or x[1] != lastuntil:
251
+ yield ret, lastuntil
252
+ ret = []
253
+ lastuntil = x[1]
254
+ ret.append(x)
255
+
256
+ if ret:
257
+ yield ret, lastuntil
258
+
259
+ # todo: more intelligent batching for heterogeneous `until`
260
+ for chunk, request_args in tqdm(
261
+ list(sameuntil_chunks(re_ord.get_reordered(), self.batch_size)),
262
+ disable=disable_tqdm,
263
+ ):
264
+ inps = []
265
+ self._max_gen_toks = request_args.get("max_gen_toks", self.max_gen_toks)
266
+ for context, _ in chunk:
267
+ context_enc = self.tok_encode(context)
268
+ inp = context_enc[-(self.max_length - self.max_gen_toks) :]
269
+ inps.append(inp)
270
+
271
+ until = request_args.get("until", ["<|endoftext|>"])
272
+ request_args["temperature"] = request_args.get("temperature", 0)
273
+
274
+ response = oa_completion(
275
+ client=self.client,
276
+ model=self.model,
277
+ prompt=inps,
278
+ max_tokens=self.max_gen_toks,
279
+ stop=until,
280
+ seed=self.seed,
281
+ **{
282
+ k: v
283
+ for k, v in request_args.items()
284
+ if k not in {"do_sample", "max_gen_toks", "until"}
285
+ },
286
+ )
287
+ for resp, (context, args_) in zip(response.choices, chunk):
288
+ s = getattr(resp, "text")
289
+
290
+ until_ = until
291
+
292
+ for term in until_:
293
+ if len(term) > 0:
294
+ s = s.split(term)[0]
295
+
296
+ # partial caching
297
+ self.cache_hook.add_partial(
298
+ "generate_until", (context, {"until": until_}), s
299
+ )
300
+
301
+ res.append(s)
302
+ return re_ord.get_original(res)
303
+
304
+ def _model_call(self, inps):
305
+ # Isn't used because we override _loglikelihood_tokens
306
+ raise NotImplementedError()
307
+
308
+ def _model_generate(self, context, max_length, eos_token_id):
309
+ # Isn't used because we override generate_until
310
+ raise NotImplementedError()
311
+
312
+ def loglikelihood_rolling(
313
+ self, requests, disable_tqdm: bool = False
314
+ ) -> List[float]:
315
+ loglikelihoods = []
316
+
317
+ for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm):
318
+ rolling_token_windows = list(
319
+ map(
320
+ utils.make_disjoint_window,
321
+ utils.get_rolling_token_windows(
322
+ token_list=self.tok_encode(string),
323
+ prefix_token=self.eot_token_id,
324
+ max_seq_len=self.max_length,
325
+ context_len=1,
326
+ ),
327
+ )
328
+ )
329
+
330
+ # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case
331
+ rolling_token_windows = [(None,) + x for x in rolling_token_windows]
332
+
333
+ string_nll = self._loglikelihood_tokens(
334
+ rolling_token_windows,
335
+ disable_tqdm=True,
336
+ )
337
+
338
+ # discard is_greedy
339
+ string_nll = [x[0] for x in string_nll]
340
+
341
+ string_nll = sum(string_nll)
342
+ loglikelihoods.append(string_nll)
343
+ return loglikelihoods
344
+
345
+
346
+ @register_model("openai-chat-completions", "local-chat-completions")
347
+ class OpenaiChatCompletionsLM(LM):
348
+ def __init__(
349
+ self,
350
+ model: str = "gpt-3.5-turbo", # GPT model or Local model using HuggingFace model paths
351
+ base_url: str = None,
352
+ truncate: bool = False,
353
+ **kwargs,
354
+ ) -> None:
355
+ """
356
+
357
+ :param model: str
358
+ Implements an OpenAI-style chat completion API for
359
+ accessing both OpenAI OR locally-hosted models using
360
+ HuggingFace Tokenizer
361
+ OpenAI API model (e.g. gpt-3.5-turbo)
362
+ using the **gen_kwargs passed on init
363
+ :param truncate: bool
364
+ Truncate input if too long (if False and input is too long, throw error)
365
+ """
366
+ super().__init__()
367
+ try:
368
+ import openai # noqa: E401
369
+ except ModuleNotFoundError:
370
+ raise Exception(
371
+ "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. \
372
+ please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`",
373
+ )
374
+ self.model = model
375
+ self.base_url = base_url
376
+ self.truncate = truncate
377
+
378
+ # Read from environment variable OPENAI_API_KEY
379
+ # Set to EMPTY for local
380
+ if self.base_url:
381
+ self.client = openai.OpenAI(base_url=self.base_url)
382
+ else:
383
+ self.client = openai.OpenAI() # openai.AsyncOpenAI()
384
+
385
+ @property
386
+ def max_length(self) -> int:
387
+ # Note: the OpenAI API supports up to 2049 tokens, with the first token being the first input token
388
+ return 2048
389
+
390
+ @property
391
+ def max_gen_toks(self) -> int:
392
+ return 256
393
+
394
+ @property
395
+ def batch_size(self):
396
+ # Isn't used because we override _loglikelihood_tokens
397
+ raise NotImplementedError()
398
+
399
+ @property
400
+ def device(self):
401
+ # Isn't used because we override _loglikelihood_tokens
402
+ raise NotImplementedError()
403
+
404
+ def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]:
405
+ res = defaultdict(list)
406
+ re_ords = {}
407
+
408
+ # we group requests by their generation_kwargs,
409
+ # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
410
+ # in the same batch.
411
+ grouper = lm_eval.models.utils.Grouper(requests, lambda x: str(x.args[1]))
412
+ for key, reqs in grouper.get_grouped().items():
413
+ # within each set of reqs for given kwargs, we reorder by token length, descending.
414
+ re_ords[key] = utils.Reorderer(
415
+ [req.args for req in reqs], lambda x: (-len(x[0]), x[0])
416
+ )
417
+
418
+ pbar = tqdm(total=len(requests), disable=(disable_tqdm or (self.rank != 0)))
419
+ for key, re_ord in re_ords.items():
420
+ # n needs to be 1 because messages in
421
+ # chat completion are not batch but
422
+ # is regarded as a single conversation.
423
+ chunks = lm_eval.models.utils.chunks(re_ord.get_reordered(), n=1)
424
+ for chunk in chunks:
425
+ contexts, all_gen_kwargs = zip(*chunk)
426
+ inps = [{"role": "user", "content": context} for context in contexts]
427
+
428
+ gen_kwargs = all_gen_kwargs[0]
429
+ until = None
430
+ if isinstance(kwargs := copy.deepcopy(gen_kwargs), dict):
431
+ if "do_sample" in kwargs.keys():
432
+ kwargs.pop("do_sample")
433
+ if "until" in kwargs.keys():
434
+ until = kwargs.pop("until")
435
+ if isinstance(until, str):
436
+ until = [kwargs]
437
+ elif not isinstance(until, list):
438
+ raise ValueError(
439
+ f"Expected repr(kwargs['until']) to be of type Union[str, list] but got {until}"
440
+ )
441
+ kwargs["stop"] = until
442
+ kwargs["max_tokens"] = kwargs.pop("max_gen_toks", self.max_gen_toks)
443
+ else:
444
+ raise ValueError(
445
+ f"Expected repr(kwargs) to be of type repr(dict) but got {kwargs}"
446
+ )
447
+
448
+ response = oa_completion(
449
+ client=self.client,
450
+ chat=True,
451
+ messages=inps,
452
+ model=self.model,
453
+ **kwargs,
454
+ )
455
+
456
+ for resp, (context, args_) in zip(response.choices, chunk):
457
+ s = resp.message.content
458
+
459
+ if until is not None:
460
+ for term in until:
461
+ if len(term) > 0:
462
+ s = s.split(term)[0]
463
+
464
+ res[key].append(s)
465
+
466
+ self.cache_hook.add_partial(
467
+ "generate_until", (context, {"until": until}), s
468
+ )
469
+ pbar.update(1)
470
+ # reorder this group of results back to original unsorted form
471
+ res[key] = re_ord.get_original(res[key])
472
+
473
+ pbar.close()
474
+
475
+ return grouper.get_original(res)
476
+
477
+ def loglikelihood(self, requests, disable_tqdm: bool = False):
478
+ raise NotImplementedError("No support for logits.")
479
+
480
+ def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
481
+ raise NotImplementedError("No support for logits.")
lm-evaluation/build/lib/lm_eval/models/optimum_lm.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from importlib.util import find_spec
2
+ from pathlib import Path
3
+
4
+ from lm_eval.api.registry import register_model
5
+ from lm_eval.models.huggingface import HFLM
6
+
7
+
8
+ @register_model("openvino")
9
+ class OptimumLM(HFLM):
10
+ """
11
+ Optimum Intel provides a simple interface to optimize Transformer models and convert them to \
12
+ OpenVINO™ Intermediate Representation (IR) format to accelerate end-to-end pipelines on \
13
+ Intel® architectures using OpenVINO™ runtime.
14
+ """
15
+
16
+ def __init__(
17
+ self,
18
+ device="cpu",
19
+ **kwargs,
20
+ ) -> None:
21
+ if "backend" in kwargs:
22
+ # optimum currently only supports causal models
23
+ assert (
24
+ kwargs["backend"] == "causal"
25
+ ), "Currently, only OVModelForCausalLM is supported."
26
+
27
+ self.openvino_device = device
28
+
29
+ super().__init__(
30
+ device=self.openvino_device,
31
+ backend=kwargs.pop("backend", "causal"),
32
+ **kwargs,
33
+ )
34
+
35
+ def _create_model(
36
+ self,
37
+ pretrained: str,
38
+ revision="main",
39
+ dtype="auto",
40
+ trust_remote_code=False,
41
+ **kwargs,
42
+ ) -> None:
43
+ if not find_spec("optimum"):
44
+ raise Exception(
45
+ "package `optimum` is not installed. Please install it via `pip install optimum[openvino]`"
46
+ )
47
+ else:
48
+ from optimum.intel.openvino import OVModelForCausalLM
49
+
50
+ model_kwargs = kwargs if kwargs else {}
51
+ model_file = Path(pretrained) / "openvino_model.xml"
52
+ if model_file.exists():
53
+ export = False
54
+ else:
55
+ export = True
56
+ kwargs["ov_config"] = {
57
+ "PERFORMANCE_HINT": "LATENCY",
58
+ "NUM_STREAMS": "1",
59
+ "CACHE_DIR": "",
60
+ }
61
+
62
+ self._model = OVModelForCausalLM.from_pretrained(
63
+ pretrained,
64
+ revision=revision,
65
+ trust_remote_code=trust_remote_code,
66
+ export=export,
67
+ device=self.openvino_device.upper(),
68
+ **model_kwargs,
69
+ )
lm-evaluation/build/lib/lm_eval/models/textsynth.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ TextSynth API
2
+ Implementation provided by Fabrice Bellard:
3
+ https://github.com/EleutherAI/lm-evaluation-harness/issues/295
4
+
5
+ In order to use the API, you must have a valid TextSynth account and
6
+ enough credits.
7
+
8
+ Example usage:
9
+
10
+ python main.py --model textsynth --model_args engine=gptj_6B --no_cache --tasks piqa
11
+
12
+ Homepage: https://textsynth.com/index.html
13
+ """
14
+ import logging
15
+ import os
16
+
17
+ import requests as _requests
18
+ from tqdm import tqdm
19
+
20
+ from lm_eval.api.model import LM
21
+ from lm_eval.api.registry import register_model
22
+ from lm_eval.models.utils import retry_on_specific_exceptions
23
+
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ def textsynth_completion(**kwargs):
29
+ """Query TextSynth API for completion.
30
+ Retry with back-off until they respond.
31
+ """
32
+
33
+ def _exception_callback(e: Exception, sleep_time: float) -> None:
34
+ import traceback
35
+
36
+ traceback.print_exc()
37
+
38
+ @retry_on_specific_exceptions(
39
+ on_exceptions=[_requests.exceptions.RequestException],
40
+ max_retries=None, # retry forever, consider changing
41
+ on_exception_callback=_exception_callback,
42
+ )
43
+ def completion():
44
+ return _requests.post(**kwargs)
45
+
46
+ return completion()
47
+
48
+
49
+ @register_model("textsynth")
50
+ class TextSynthLM(LM):
51
+ def __init__(self, engine, truncate: bool = False, **kwargs) -> None:
52
+ """
53
+ :param engine: str
54
+ TextSynth API engine (e.g. `gptj_6B`)
55
+ :param truncate: bool
56
+ Truncate input if too long (if False and input is too long, throw error)
57
+ """
58
+ super().__init__()
59
+
60
+ self.engine = engine
61
+ self.truncate = truncate
62
+ self.api_url = "https://api.textsynth.com"
63
+ # Read from environment variable TEXTSYNTH_API_SECRET_KEY
64
+ self.api_key = os.environ["TEXTSYNTH_API_SECRET_KEY"]
65
+
66
+ @property
67
+ def eot_token_id(self):
68
+ # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until
69
+ raise NotImplementedError()
70
+
71
+ @property
72
+ def max_length(self) -> int:
73
+ # NOTE: Turn on truncation to avoid errors on long inputs.
74
+ return 2048
75
+
76
+ @property
77
+ def max_gen_toks(self) -> int:
78
+ return 256
79
+
80
+ @property
81
+ def batch_size(self):
82
+ # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until
83
+ raise NotImplementedError()
84
+
85
+ @property
86
+ def device(self):
87
+ # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until
88
+ raise NotImplementedError()
89
+
90
+ def tok_encode(self, string: str):
91
+ # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until
92
+ raise NotImplementedError()
93
+
94
+ def tok_decode(self, tokens):
95
+ # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until
96
+ raise NotImplementedError()
97
+
98
+ def loglikelihood(self, requests, disable_tqdm: bool = False):
99
+ res = []
100
+ for context, continuation in tqdm(requests, disable=disable_tqdm):
101
+ response = textsynth_completion(
102
+ url=self.api_url + "/v1/engines/" + self.engine + "/logprob",
103
+ headers={"Authorization": "Bearer " + self.api_key},
104
+ json={"context": context, "continuation": continuation},
105
+ )
106
+ resp = response.json()
107
+ if "logprob" in resp:
108
+ logprob = resp["logprob"]
109
+ is_greedy = resp["is_greedy"]
110
+ res.append((logprob, is_greedy))
111
+
112
+ self.cache_hook.add_partial(
113
+ "loglikelihood", (context, continuation), (logprob, is_greedy)
114
+ )
115
+ else:
116
+ logger.error(
117
+ f"The following response does not contain `logprobs`. Got:\n{resp}"
118
+ )
119
+ assert False
120
+ return res
121
+
122
+ def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
123
+ # TODO: The TextSynth API does not support tokenized inputs so we cannot
124
+ # manually partition long contexts into smaller rolling windows as
125
+ # done for other models derived from `BaseLM`. Override this method
126
+ # with a windowing scheme that works for direct string inputs.
127
+ raise NotImplementedError(
128
+ "`loglikelihood_rolling` is currently not supported due to lack of "
129
+ "input tokenization support from TextSynth."
130
+ )
131
+
132
+ def generate_until(self, requests, disable_tqdm: bool = False):
133
+ if not requests:
134
+ return []
135
+
136
+ res = []
137
+ for request in tqdm(requests, disable=disable_tqdm):
138
+ inp = request[0]
139
+ request_args = request[1]
140
+ until = request_args["until"]
141
+ response = textsynth_completion(
142
+ url=self.api_url + "/v1/engines/" + self.engine + "/completions",
143
+ headers={"Authorization": "Bearer " + self.api_key},
144
+ json={
145
+ "prompt": inp,
146
+ "max_tokens": self.max_gen_toks,
147
+ "top_k": 1,
148
+ "stop": until,
149
+ },
150
+ )
151
+ resp = response.json()
152
+ if "text" in resp:
153
+ s = resp["text"]
154
+ res.append(s)
155
+
156
+ self.cache_hook.add_partial("generate_until", (inp, request_args), s)
157
+ else:
158
+ logger.error(
159
+ "The following response does not contain generated `text`. "
160
+ "Got:\n{resp}"
161
+ )
162
+ assert False
163
+ return res
164
+
165
+ def _model_call(self, inps):
166
+ # Isn't used because we override _loglikelihood_tokens
167
+ raise NotImplementedError()
168
+
169
+ def _model_generate(self, context, max_length, eos_token_id):
170
+ # Isn't used because we override generate_until
171
+ raise NotImplementedError()
lm-evaluation/build/lib/lm_eval/models/utils.py ADDED
@@ -0,0 +1,615 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import fnmatch
3
+ import gc
4
+ import itertools
5
+ import time
6
+ from functools import wraps
7
+ from typing import (
8
+ Any,
9
+ Callable,
10
+ Dict,
11
+ Iterable,
12
+ Iterator,
13
+ List,
14
+ Literal,
15
+ Optional,
16
+ Tuple,
17
+ Type,
18
+ Union,
19
+ )
20
+
21
+ import torch
22
+ import transformers
23
+
24
+ from lm_eval.utils import eval_logger
25
+
26
+
27
+ def chunks(iter, n: int = 0, fn=None):
28
+ """
29
+ Divides an iterable into chunks of specified size or based on a given function.
30
+ Useful for batching
31
+
32
+ Parameters:
33
+ - iter: The input iterable to be divided into chunks.
34
+ - n: An integer representing the size of each chunk. Default is 0.
35
+ - fn: A function that takes the current index and the iterable as arguments and returns the size of the chunk. Default is None.
36
+
37
+ Returns:
38
+ An iterator that yields chunks of the input iterable.
39
+
40
+ Example usage:
41
+ ```
42
+ data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
43
+ for chunk in chunks(data, 3):
44
+ print(chunk)
45
+ ```
46
+ Output:
47
+ ```
48
+ [1, 2, 3]
49
+ [4, 5, 6]
50
+ [7, 8, 9]
51
+ [10]
52
+ ```
53
+ """
54
+ arr = []
55
+ for i, x in enumerate(iter):
56
+ arr.append(x)
57
+ if len(arr) == (fn(i, iter) if fn else n):
58
+ yield arr
59
+ arr = []
60
+
61
+ if arr:
62
+ yield arr
63
+
64
+
65
+ class MultiChoice:
66
+ def __init__(self, choices) -> None:
67
+ self.choices = choices
68
+
69
+ # Simple wildcard support (linux filename patterns)
70
+ def __contains__(self, values) -> bool:
71
+ for value in values.split(","):
72
+ if len(fnmatch.filter(self.choices, value)) == 0:
73
+ eval_logger.info("Available tasks to choose:")
74
+ for choice in self.choices:
75
+ eval_logger.info(f" - {choice}")
76
+ raise ValueError("'{}' is not in task list".format(value))
77
+ return True
78
+
79
+ def __iter__(self) -> Iterator:
80
+ for choice in self.choices:
81
+ yield choice
82
+
83
+
84
+ class Grouper:
85
+ """
86
+ takes an array `arr` and function `fn` and returns a dictionary
87
+ with keys fn(ob) for each ob in `arr` and with values `self.arr[key]` a list of all
88
+ objects in `arr` satisfying `key == fn(ob)`.
89
+ """
90
+
91
+ def __init__(self, arr, fn) -> None:
92
+ # self.orig_arr = arr
93
+ self.size = len(arr)
94
+ arr = list(enumerate(arr))
95
+
96
+ def group_return_dict(arr, fn):
97
+ res = collections.defaultdict(list)
98
+
99
+ for ob in arr:
100
+ res[fn(ob)].append(ob)
101
+ return res
102
+
103
+ arr = group_return_dict(arr, lambda x: fn(x[1]))
104
+
105
+ # self.arr has format Dict[Tuple[int, <entry from orig. arr>]]
106
+ self.arr = arr
107
+ self._grouped = None
108
+
109
+ def get_grouped(self):
110
+ # return the contents but not indices for our grouped dict.
111
+ if self._grouped:
112
+ return self._grouped
113
+ grouped = {}
114
+ for key in self.arr.keys():
115
+ # drop the index from each element of self.arr
116
+ grouped[key] = [y[1] for y in self.arr[key]]
117
+ self._grouped = grouped
118
+ return grouped
119
+
120
+ def get_original(self, grouped_dict):
121
+ # take in a grouped dictionary with e.g. results for each key listed
122
+ # in the same order as the instances in `self.arr`, and
123
+ # return the results in the same (single list) order as `self.orig_arr`.
124
+ res = [None] * self.size
125
+ cov = [False] * self.size
126
+ # orig = [None] * self.size
127
+
128
+ assert grouped_dict.keys() == self.arr.keys()
129
+
130
+ for key in grouped_dict.keys():
131
+ for (ind, _), v in zip(self.arr[key], grouped_dict[key]):
132
+ res[ind] = v
133
+ cov[ind] = True
134
+ # orig[ind] = _
135
+
136
+ assert all(cov)
137
+ # assert orig == self.orig_arr
138
+
139
+ return res
140
+
141
+
142
+ def pad_and_concat(
143
+ max_length: int,
144
+ tensors: List[torch.Tensor],
145
+ padding_side: Literal["right", "left"] = "right",
146
+ ):
147
+ """
148
+ Method for padding a list of tensors given the maximum tensor
149
+ length in the batch. Used for batching inputs and continuations in
150
+ seq2seq models.
151
+ """
152
+ assert (
153
+ padding_side == "left" or padding_side == "right"
154
+ ), f"Unrecognized padding type: '{padding_side}' not 'left' or 'right'"
155
+
156
+ for i, tensor in enumerate(tensors):
157
+ if len(tensor.shape) == 2:
158
+ tensor = tensor.squeeze(0) # squeeze, in case passed [1, seq] size
159
+ tensor_len = tensor.shape[0]
160
+ if tensor_len < max_length:
161
+ if padding_side == "right":
162
+ # right-pad
163
+ tensors[i] = torch.cat(
164
+ [
165
+ tensor, # [seq]
166
+ torch.zeros(
167
+ max_length - tensor_len,
168
+ dtype=torch.long,
169
+ device=tensor.device,
170
+ ), # [padding_length - seq]
171
+ ],
172
+ dim=0,
173
+ ).unsqueeze(0)
174
+ else:
175
+ # left-pad
176
+ tensors[i] = torch.cat(
177
+ [
178
+ torch.zeros(
179
+ max_length - tensor_len,
180
+ dtype=torch.long,
181
+ device=tensor.device,
182
+ ), # [padding_length - seq]
183
+ tensor, # [seq]
184
+ ],
185
+ dim=0,
186
+ ).unsqueeze(0)
187
+ else:
188
+ tensors[i] = tensor.unsqueeze(0)
189
+
190
+ return torch.cat(tensors, dim=0)
191
+
192
+
193
+ def clear_torch_cache() -> None:
194
+ gc.collect()
195
+ torch.cuda.empty_cache()
196
+
197
+
198
+ def get_dtype(dtype: Union[str, torch.dtype]) -> torch.dtype:
199
+ """Converts `dtype` from `str` to torch.dtype when possible. Does not use an instantiated HF AutoConfig"""
200
+ if isinstance(dtype, str) and dtype != "auto":
201
+ # Convert `str` args torch dtype: `float16` -> `torch.float16`
202
+ _torch_dtype = getattr(torch, dtype)
203
+ else:
204
+ _torch_dtype = dtype
205
+ return _torch_dtype
206
+
207
+
208
+ class MultiTokenEOSCriteria(transformers.StoppingCriteria):
209
+ """Criteria to stop on the specified multi-token sequence."""
210
+
211
+ def __init__(
212
+ self,
213
+ sequence: str,
214
+ tokenizer: transformers.PreTrainedTokenizer,
215
+ initial_decoder_input_length: int,
216
+ batch_size: int,
217
+ ) -> None:
218
+ self.initial_decoder_input_length = initial_decoder_input_length
219
+ self.done_tracker = [False] * batch_size
220
+ self.sequence = sequence
221
+ self.sequence_ids = tokenizer.encode(sequence, add_special_tokens=False)
222
+ # print(sequence, self.sequence_ids)
223
+ # we look back for 2 more tokens than it takes to encode our stop sequence
224
+ # because tokenizers suck, and a model might generate `['\n', '\n']` but our `sequence` is `['\n\n']`
225
+ # and we don't want to mistakenly not stop a generation because our
226
+ # (string) stop sequence was output in a different tokenization
227
+
228
+ # NOTE: there is a minor danger that this will end up looking back 2 tokens into the past, into the inputs to the model,
229
+ # and stopping generation immediately as a result. With only 2 extra tokens of lookback, this risk is minimized
230
+ # Additionally, in lookback_ids_batch we should prevent ever looking back into the inputs as described.
231
+ self.sequence_id_len = len(self.sequence_ids) + 2
232
+ self.tokenizer = tokenizer
233
+
234
+ def __call__(self, input_ids, scores, **kwargs) -> bool:
235
+ # For efficiency, we compare the last n tokens where n is the number of tokens in the stop_sequence
236
+ lookback_ids_batch = input_ids[:, self.initial_decoder_input_length :]
237
+
238
+ lookback_ids_batch = lookback_ids_batch[:, -self.sequence_id_len :]
239
+
240
+ lookback_tokens_batch = self.tokenizer.batch_decode(lookback_ids_batch)
241
+
242
+ for i, done in enumerate(self.done_tracker):
243
+ if not done:
244
+ self.done_tracker[i] = self.sequence in lookback_tokens_batch[i]
245
+ return False not in self.done_tracker
246
+
247
+
248
+ def stop_sequences_criteria(
249
+ tokenizer: transformers.PreTrainedTokenizer,
250
+ stop_sequences: List[str],
251
+ initial_decoder_input_length: int,
252
+ batch_size: int,
253
+ ) -> transformers.StoppingCriteriaList:
254
+ return transformers.StoppingCriteriaList(
255
+ [
256
+ *[
257
+ MultiTokenEOSCriteria(
258
+ sequence, tokenizer, initial_decoder_input_length, batch_size
259
+ )
260
+ for sequence in stop_sequences
261
+ ],
262
+ ]
263
+ )
264
+
265
+
266
+ def undistribute(iterable):
267
+ """
268
+ Undoes https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.distribute .
269
+
270
+ Re-interleaves results that have been split using more_itertools.distribute:
271
+ >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
272
+ >>> list(group_1)
273
+ [1, 3, 5]
274
+ >>> list(group_2)
275
+ [2, 4, 6]
276
+ >>> undistribute([group_1, group_2])
277
+ [1, 2, 3, 4, 5, 6]
278
+
279
+ Handles non-uniform component lengths:
280
+
281
+ >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
282
+ >>> [list(c) for c in children]
283
+ [[1, 4, 7], [2, 5], [3, 6]]
284
+ >>> undistribute(children)
285
+ [1, 2, 3, 4, 5, 6, 7]
286
+
287
+ Also handles when some iterables are empty:
288
+
289
+ >>> children = distribute(5, [1, 2, 3])
290
+ >>> [list(c) for c in children]
291
+ [[1], [2], [3], [], []]
292
+ >>> undistribute(children)
293
+ [1, 2, 3]
294
+
295
+ """
296
+
297
+ return [
298
+ x
299
+ for x in itertools.chain.from_iterable(
300
+ itertools.zip_longest(*[list(x) for x in iterable])
301
+ )
302
+ if x is not None
303
+ ]
304
+
305
+
306
+ def retry_on_specific_exceptions(
307
+ on_exceptions: List[Type[Exception]],
308
+ max_retries: Optional[int] = None,
309
+ backoff_time: float = 3.0,
310
+ backoff_multiplier: float = 1.5,
311
+ on_exception_callback: Optional[Callable[[Exception, float], Any]] = None,
312
+ ):
313
+ """Retry on an LLM Provider's rate limit error with exponential backoff
314
+ For example, to use for OpenAI, do the following:
315
+ ```
316
+ from openai import RateLimitError
317
+
318
+ # Recommend specifying max_retries to avoid infinite loops!
319
+ @retry_on_specific_exceptions([RateLimitError], max_retries=3)
320
+ def completion(...):
321
+ # Wrap OpenAI completion function here
322
+ ...
323
+ ```
324
+ """
325
+
326
+ def decorator(func: Callable):
327
+ @wraps(func)
328
+ def wrapper(*args, **kwargs):
329
+ sleep_time = backoff_time
330
+ attempt = 0
331
+ while max_retries is None or attempt < max_retries:
332
+ try:
333
+ return func(*args, **kwargs)
334
+ except tuple(on_exceptions) as e:
335
+ if on_exception_callback is not None:
336
+ on_exception_callback(e, sleep_time)
337
+ time.sleep(sleep_time)
338
+ sleep_time *= backoff_multiplier
339
+ attempt += 1
340
+
341
+ return wrapper
342
+
343
+ return decorator
344
+
345
+
346
+ class Collator:
347
+ """
348
+ A class for reordering and batching elements of an array.
349
+
350
+ This class allows for sorting an array based on a provided sorting function, grouping elements based on a grouping function, and generating batches from the sorted and grouped data.
351
+
352
+ Objects of this class have the group_by attribute which determines the method for grouping
353
+ the data while batching it. Three options include "gen_kwargs", "contexts", or None:
354
+ If group_by == "gen_kwargs" then requests will be grouped by gen_kwargs
355
+ If group_by == "contexts" then requests will be grouped by context + cont[:-1]
356
+ If None then requests will just be reordered by length descending.
357
+ """
358
+
359
+ def __init__(
360
+ self,
361
+ arr: List,
362
+ sort_fn: Callable = lambda x: x,
363
+ group_fn: Callable = lambda x: x[1],
364
+ group_by: Union[Literal["gen_kwargs", "contexts"], None] = None,
365
+ ) -> None:
366
+ self._group_by = group_by
367
+ # 0 indices are enumerated indices. Apply functions to original arr.
368
+ self._sort_fn = lambda x: sort_fn(x[1])
369
+ self._group_fn = lambda x: group_fn(x[1])
370
+ self._reorder_indices: List = []
371
+ self._size = len(arr)
372
+ self._arr_with_indices: Union[Dict, Tuple[Tuple[int, Any], ...]] = tuple(
373
+ enumerate(arr)
374
+ ) # [indices, (arr)]
375
+ if self._group_by == "contexts":
376
+ self._group_by_context()
377
+ elif self._group_by == "gen_kwargs":
378
+ self._group_by_index()
379
+
380
+ def _group_by_index(self) -> None:
381
+ """Group the elements of a list based on their indices."""
382
+ self._arr_with_indices = self.group(
383
+ self._arr_with_indices, fn=self._group_fn, group_by="gen_kwargs"
384
+ )
385
+
386
+ def _group_by_context(self) -> None:
387
+ """Group the array with indices by context."""
388
+ self._arr_with_indices = self.group(
389
+ self._arr_with_indices, fn=self._group_fn, group_by="contexts"
390
+ )
391
+
392
+ def get_batched(self, n: int = 1, batch_fn: Optional[Callable] = None) -> Iterator:
393
+ """
394
+ Generates and yields batches from the reordered array. The method of grouping and batching
395
+ depends on the parameter `group_by`.
396
+ If `group_by` is set to "gen_kwargs", it will batch the
397
+ re-ordered values with same gen_kwargs for each batch.
398
+ If `group_by` is "contexts", it caches the requests by context before batching.
399
+ If `group_by` is neither "gen_kwargs" nor "contexts", it yields the reordered array
400
+
401
+ Parameters:
402
+ - n (int): The size of each batch. Defaults to 1.
403
+ - batch_fn ([Callable[[int, Iterable], int]] | None): A function to determine the size of
404
+ each batch. Optional, defaults to None.
405
+
406
+ Returns:
407
+ Iterator: An iterator over batches of reordered elements grouped as per the `group_by`
408
+ attribute.
409
+
410
+ Yields:
411
+ List of batched elements according to the `group_by` attribute.
412
+ """
413
+ if self._group_by == "gen_kwargs":
414
+ for (
415
+ key,
416
+ values,
417
+ ) in self._arr_with_indices.items(): # type: ignore
418
+ values = self._reorder(values)
419
+ batch = self.get_chunks(values, n=n, fn=batch_fn)
420
+ yield from batch
421
+ elif self._group_by == "contexts":
422
+ # Get one sample from each key
423
+ values = self._reorder(
424
+ [value[0] for value in self._arr_with_indices.values()]
425
+ )
426
+ batch = self.get_chunks(values, n=n, fn=batch_fn)
427
+ yield from batch
428
+ else:
429
+ values = self._reorder(self._arr_with_indices) # type: ignore
430
+ batch = self.get_chunks(values, n=n, fn=batch_fn)
431
+ yield from batch
432
+
433
+ def get_cache(
434
+ self,
435
+ req_str: Tuple[str, str] = None,
436
+ cxt_toks: List[int] = None,
437
+ cont_toks: List[int] = None,
438
+ logits: torch.Tensor = None,
439
+ ) -> Iterator[Tuple[Tuple[str, str], List[int], torch.Tensor]]:
440
+ """
441
+ Retrieves cached single-token continuations and their associated arguments, updating indices as necessary.
442
+
443
+ The behavior of this function varies depending on how the `group_by` attribute is set:
444
+
445
+ - When `group_by` is "contexts":
446
+ The function identifies single-token continuations by checking for keys that equate to
447
+ [context+continuation][-1] and logs the indices for re-ordering.
448
+ In this mode, this function can work in two scenarios:
449
+
450
+ 1. Cache Hit - Single Match:
451
+ If a single matching context-continuation pair is found in the cache,
452
+ the function yields the original arguments.
453
+
454
+ 2. Cache Hit - Multiple Matches:
455
+ If multiple matching context-continuation pairs are found in the cache,
456
+ the function expands the logits batch dimension to match the number of cache hits.
457
+ It updates the original requests and continuation tokens.
458
+
459
+ - When `group_by` is not set to "contexts":
460
+ This method yields the original arguments, logits and continuation tokens,
461
+ without checking for one-token continuations.
462
+
463
+ Parameters:
464
+ - req_str (tuple[str, str]): Original strings used for CachingLM.
465
+ - cxt_toks (list[int]): Full context tokens used for lookup.
466
+ - cont_toks (list[int]): Continuation tokens for which logits were generated.
467
+ - logits (torch.Tensor [1, seq_length, vocab_size]): Logits generated by the model given context and continuation keys.
468
+
469
+ Yields:
470
+ - Iterator:
471
+ - req_str (tuple[str, str]): strings used for CachingLM.
472
+ - cont_toks (list[int]) : continuation tokens.
473
+ - logits (torch.Tensor [1, seq_length, vocab_size]): The original logits (repeated cache hit times)
474
+ """
475
+ if self._group_by == "contexts":
476
+ cache_hit: List[
477
+ Tuple[int, Tuple[Tuple[str, str], List[int], List[int]]]
478
+ ] = self._arr_with_indices.pop(tuple(cxt_toks + cont_toks[:-1]))
479
+ if (cache_size := len(cache_hit)) == 1:
480
+ self._reorder_indices.extend(x[0] for x in cache_hit)
481
+ yield req_str, cont_toks, logits
482
+ else:
483
+ # If we have matching requests then expand the batch dimension (no-op) and
484
+ # yield each along with its corresponding args.
485
+ multilogits = logits.expand(cache_size, -1, -1).chunk(cache_size)
486
+ indices, req_str, cont_toks = zip(
487
+ *[(x[0], x[1][0], x[-1][-1]) for x in cache_hit]
488
+ )
489
+ self._reorder_indices.extend(indices)
490
+ for c_key, cont_tok, logit in zip(req_str, cont_toks, multilogits):
491
+ yield c_key, cont_tok, logit
492
+ else:
493
+ yield req_str, cont_toks, logits
494
+
495
+ def _reorder(self, arr: Union[List, Tuple[Tuple[int, Any], ...]]) -> Iterator:
496
+ """
497
+ Reorders the elements in the array based on the sorting function.
498
+
499
+ Parameters:
500
+ - arr (list | tuple[tuple[int, Any], ...]]): The array or iterable to be reordered.
501
+
502
+ Yields:
503
+ Iterator
504
+ """
505
+ arr = sorted(arr, key=self._sort_fn)
506
+ if not self._group_by == "contexts":
507
+ # If grouped by contexts then indices will be set in get_cache()
508
+ self._reorder_indices.extend([x[0] for x in arr])
509
+ yield from [x[1] for x in arr]
510
+
511
+ def get_original(self, newarr: List) -> List:
512
+ """
513
+ Restores the original order of elements from the reordered list.
514
+
515
+ Parameters:
516
+ - newarr (list): The reordered array.
517
+
518
+ Returns:
519
+ list: The array with elements restored to their original order.
520
+ """
521
+ res = [None] * self._size
522
+ cov = [False] * self._size
523
+
524
+ for ind, v in zip(self._reorder_indices, newarr):
525
+ res[ind] = v
526
+ cov[ind] = True
527
+
528
+ assert all(cov)
529
+
530
+ return res
531
+
532
+ def __len__(self):
533
+ return self._size
534
+
535
+ @staticmethod
536
+ def group(
537
+ arr: Iterable,
538
+ fn: Callable,
539
+ group_by: Literal["gen_kwargs", "contexts"] = "gen_kwargs",
540
+ ) -> dict:
541
+ """
542
+ Groups elements of an iterable based on a provided function.
543
+
544
+
545
+ The `group_by` parameter determines the method of grouping.
546
+ If `group_by` is "contexts", the elements are grouped by [context + cont][:-1].
547
+ If `group_by` is "gen_kwargs", the elements are grouped based on the gen_kwargs dict.
548
+
549
+ Parameters:
550
+ - arr (Iterable): The iterable to be grouped.
551
+ - fn (Callable): The function to determine the grouping.
552
+ - values (bool): If True, returns the values of the group. Defaults to False.
553
+
554
+ Returns:
555
+ Iterator: An iterable of grouped elements.
556
+ """
557
+ res = collections.defaultdict(list)
558
+ for ob in arr:
559
+ # where ob == [context + cont]
560
+ if group_by == "contexts":
561
+ res[tuple(fn(ob))].append(ob)
562
+ else:
563
+ try:
564
+ hashable_dict = tuple(
565
+ (
566
+ key,
567
+ tuple(value)
568
+ if isinstance(value, collections.abc.Iterable)
569
+ else value,
570
+ )
571
+ for key, value in sorted(fn(ob).items())
572
+ )
573
+ res[hashable_dict].append(ob)
574
+ except (TypeError, AttributeError):
575
+ res[tuple(fn(ob))].append(ob)
576
+ return res
577
+
578
+ @staticmethod
579
+ def get_chunks(_iter, n: int = 0, fn=None):
580
+ """
581
+ Divides an iterable into chunks of specified size or based on a given function.
582
+ Useful for batching
583
+
584
+ Parameters:
585
+ - iter: The input iterable to be divided into chunks.
586
+ - n: An integer representing the size of each chunk. Default is 0.
587
+ - fn: A function that takes the current index and the iterable as arguments and returns the size of the chunk. Default is None.
588
+
589
+ Returns:
590
+ An iterator that yields chunks of the input iterable.
591
+
592
+ Example usage:
593
+ ```
594
+ data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
595
+ for chunk in chunks(data, 3):
596
+ print(chunk)
597
+ ```
598
+ Output:
599
+ ```
600
+ [1, 2, 3]
601
+ [4, 5, 6]
602
+ [7, 8, 9]
603
+ [10]
604
+ ```
605
+ """
606
+ arr = []
607
+ _iter = tuple(_iter)
608
+ for i, x in enumerate(_iter):
609
+ arr.append(x)
610
+ if len(arr) == (fn(i, _iter) if fn else n):
611
+ yield arr
612
+ arr = []
613
+
614
+ if arr:
615
+ yield arr
lm-evaluation/build/lib/lm_eval/models/vllm_causallms.py ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from importlib.metadata import version
3
+ from importlib.util import find_spec
4
+ from typing import List, Literal, Optional, Tuple, Union
5
+
6
+ from more_itertools import distribute
7
+ from packaging.version import parse as parse_version
8
+ from tqdm import tqdm
9
+
10
+ from lm_eval.api.instance import Instance
11
+ from lm_eval.api.model import TemplateLM
12
+ from lm_eval.api.registry import register_model
13
+ from lm_eval.models.utils import Collator, undistribute
14
+ from lm_eval.utils import (
15
+ eval_logger,
16
+ get_rolling_token_windows,
17
+ make_disjoint_window,
18
+ )
19
+
20
+
21
+ try:
22
+ import ray
23
+ from vllm import LLM, SamplingParams
24
+ from vllm.transformers_utils.tokenizer import get_tokenizer
25
+ except ModuleNotFoundError:
26
+ pass
27
+
28
+ eval_logger = eval_logger
29
+
30
+
31
+ @register_model("vllm")
32
+ class VLLM(TemplateLM):
33
+ _DEFAULT_MAX_LENGTH = 2048
34
+
35
+ def __init__(
36
+ self,
37
+ pretrained="gpt2",
38
+ dtype: Literal["float16", "bfloat16", "float32", "auto"] = "auto",
39
+ revision: Optional[str] = None,
40
+ trust_remote_code: Optional[bool] = False,
41
+ tokenizer: Optional[str] = None,
42
+ tokenizer_mode: Literal["auto", "slow"] = "auto",
43
+ tokenizer_revision: Optional[str] = None,
44
+ add_bos_token: Optional[bool] = False,
45
+ prefix_token_id: Optional[int] = None,
46
+ tensor_parallel_size: int = 1,
47
+ quantization: Optional[str] = None,
48
+ max_gen_toks: int = 256,
49
+ swap_space: int = 4,
50
+ batch_size: Union[str, int] = 1,
51
+ max_batch_size=None,
52
+ max_length: int = None,
53
+ max_model_len: int = None,
54
+ seed: int = 1234,
55
+ gpu_memory_utilization: float = 0.9,
56
+ device: str = "cuda",
57
+ data_parallel_size: int = 1,
58
+ **kwargs,
59
+ ):
60
+ super().__init__()
61
+
62
+ if not find_spec("vllm"):
63
+ raise Exception(
64
+ "attempted to use 'vllm' LM type, but package `vllm` is not installed. "
65
+ "Please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`"
66
+ )
67
+
68
+ assert "cuda" in device or device is None, "vLLM only supports CUDA"
69
+ assert (
70
+ max_length is None or max_model_len is None
71
+ ), "Either max_length or max_model_len may be provided, but not both"
72
+
73
+ self._max_length = max_model_len if max_model_len is not None else max_length
74
+ self.tensor_parallel_size = int(tensor_parallel_size)
75
+ self.data_parallel_size = int(data_parallel_size)
76
+ self.model_args = {
77
+ "model": pretrained,
78
+ "gpu_memory_utilization": float(gpu_memory_utilization),
79
+ "revision": revision,
80
+ "dtype": dtype,
81
+ "tokenizer": tokenizer,
82
+ "tokenizer_mode": tokenizer_mode,
83
+ "tokenizer_revision": tokenizer_revision,
84
+ "trust_remote_code": trust_remote_code,
85
+ "tensor_parallel_size": int(tensor_parallel_size),
86
+ "max_model_len": int(self._max_length) if self._max_length else None,
87
+ "swap_space": int(swap_space),
88
+ "quantization": quantization,
89
+ "seed": int(seed),
90
+ }
91
+ self.model_args.update(kwargs)
92
+ self.batch_size = (
93
+ "auto"
94
+ if isinstance(batch_size, str) and "auto" in batch_size
95
+ else batch_size
96
+ )
97
+ if self.data_parallel_size <= 1:
98
+ self.model = LLM(**self.model_args)
99
+ else:
100
+ assert parse_version(version("vllm")) < parse_version(
101
+ "0.3.3"
102
+ ), "data_parallel is only compatible with vllm < v0.3.3."
103
+ eval_logger.warning(
104
+ "You might experience occasional issues with model weight downloading when data_parallel is in use. To ensure stable performance, run with data_parallel_size=1 until the weights are downloaded and cached."
105
+ )
106
+ self.model_args["worker_use_ray"] = True
107
+ self.batch_size = "auto"
108
+ eval_logger.info("Manual batching is not compatible with data parallelism.")
109
+
110
+ from transformers import AutoConfig
111
+
112
+ self._config = AutoConfig.from_pretrained(
113
+ pretrained, trust_remote_code=trust_remote_code, revision=revision
114
+ )
115
+ self.tokenizer = get_tokenizer(
116
+ tokenizer if tokenizer else pretrained,
117
+ tokenizer_mode=tokenizer_mode,
118
+ trust_remote_code=trust_remote_code,
119
+ tokenizer_revision=tokenizer_revision,
120
+ )
121
+ self.add_bos_token = add_bos_token
122
+ self.custom_prefix_token_id = prefix_token_id
123
+ if prefix_token_id is not None:
124
+ eval_logger.info(
125
+ f"Loglikelihood prefix token id used in evaluation: {self.prefix_token_id}"
126
+ )
127
+
128
+ self._max_gen_toks = max_gen_toks
129
+
130
+ @property
131
+ def eot_token_id(self):
132
+ # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
133
+ return self.tokenizer.eos_token_id
134
+
135
+ @property
136
+ def prefix_token_id(self):
137
+ # it is used as prefix for loglikelihood
138
+ if self.custom_prefix_token_id is not None:
139
+ return self.custom_prefix_token_id
140
+ if self.tokenizer.bos_token_id is not None:
141
+ return self.tokenizer.bos_token_id
142
+ return self.tokenizer.eos_token_id
143
+
144
+ @property
145
+ def max_length(self):
146
+ if self._max_length: # if max length manually set, return it
147
+ return self._max_length
148
+ if self.data_parallel_size <= 1:
149
+ return self.model.llm_engine.model_config.max_model_len
150
+ else:
151
+ seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx")
152
+ for attr in seqlen_config_attrs:
153
+ if hasattr(self._config, attr):
154
+ return getattr(self._config, attr)
155
+ if hasattr(self.tokenizer, "model_max_length"):
156
+ if self.tokenizer.model_max_length == 1000000000000000019884624838656:
157
+ return self._DEFAULT_MAX_LENGTH
158
+ return self.tokenizer.model_max_length
159
+ return self._DEFAULT_MAX_LENGTH
160
+
161
+ @property
162
+ def max_gen_toks(self):
163
+ return self._max_gen_toks
164
+
165
+ def tok_encode(
166
+ self,
167
+ string: str,
168
+ left_truncate_len=None,
169
+ add_special_tokens=None,
170
+ truncation=False,
171
+ ):
172
+ """ """
173
+ if not add_special_tokens:
174
+ add_special_tokens = False or self.add_bos_token
175
+ encoding = self.tokenizer.encode(
176
+ string, add_special_tokens=add_special_tokens, truncation=truncation
177
+ )
178
+
179
+ # left-truncate the encoded context to be at most `left_truncate_len` tokens long
180
+ if left_truncate_len:
181
+ encoding = encoding[-left_truncate_len:]
182
+
183
+ return encoding
184
+
185
+ def _model_generate(
186
+ self,
187
+ requests: List[List[int]] = None,
188
+ generate: bool = False,
189
+ max_tokens: int = None,
190
+ stop: Optional[List[str]] = None,
191
+ **kwargs,
192
+ ):
193
+ if generate:
194
+ kwargs = self.modify_gen_kwargs(kwargs)
195
+ sampling_params = SamplingParams(max_tokens=max_tokens, stop=stop, **kwargs)
196
+ else:
197
+ sampling_params = SamplingParams(
198
+ temperature=0, prompt_logprobs=1, max_tokens=1
199
+ )
200
+ if self.data_parallel_size > 1:
201
+ # vLLM hangs if tensor_parallel > 1 and resources are set in ray.remote
202
+ # also seems to only work with decorator and not with ray.remote() fn
203
+ # see https://github.com/vllm-project/vllm/issues/973
204
+ # note: this has changed on 0.3.3, and it only works now if num_gpus are set.
205
+ # but then tensor_parallel breaks
206
+ @ray.remote
207
+ def run_inference_one_model(
208
+ model_args: dict, sampling_params, requests: List[List[int]]
209
+ ):
210
+ llm = LLM(**model_args)
211
+ return llm.generate(
212
+ prompt_token_ids=requests, sampling_params=sampling_params
213
+ )
214
+
215
+ # dispatch requests to all self.data_parallel_size workers, in interleaved fashion
216
+ # interleaved important to balance context lengths across workers
217
+ requests = [list(x) for x in distribute(self.data_parallel_size, requests)]
218
+ inputs = ((self.model_args, sampling_params, req) for req in requests)
219
+ object_refs = [run_inference_one_model.remote(*x) for x in inputs]
220
+ results = ray.get(object_refs)
221
+ # Invoke ray.shutdown() to prevent hang-ups if subsequent calls required.
222
+ ray.shutdown()
223
+ # flatten results
224
+ return undistribute(results)
225
+
226
+ outputs = self.model.generate(
227
+ prompt_token_ids=requests,
228
+ sampling_params=sampling_params,
229
+ use_tqdm=True if self.batch_size == "auto" else False,
230
+ )
231
+ return outputs
232
+
233
+ def loglikelihood_rolling(
234
+ self, requests: List[Instance], disable_tqdm: bool = False
235
+ ) -> List[float]:
236
+ loglikelihoods = []
237
+
238
+ for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm):
239
+ rolling_token_windows = list(
240
+ map(
241
+ make_disjoint_window,
242
+ get_rolling_token_windows(
243
+ token_list=self.tok_encode(string),
244
+ prefix_token=self.eot_token_id,
245
+ max_seq_len=self.max_length - 1,
246
+ context_len=1,
247
+ ),
248
+ )
249
+ )
250
+
251
+ rolling_token_windows = [(None,) + x for x in rolling_token_windows]
252
+
253
+ string_nll = self._loglikelihood_tokens(
254
+ rolling_token_windows,
255
+ )
256
+
257
+ # discard is_greedy
258
+ string_nll = [x[0] for x in string_nll]
259
+
260
+ string_nll = sum(string_nll)
261
+ loglikelihoods.append(string_nll)
262
+ return loglikelihoods
263
+
264
+ def generate_until(
265
+ self, requests: List[Instance], disable_tqdm: bool = False
266
+ ) -> List[str]:
267
+ res = []
268
+
269
+ # batch tokenize contexts
270
+ context, all_gen_kwargs = zip(*(req.args for req in requests))
271
+ context_encoding = self.tokenizer(context, add_special_tokens=False).input_ids
272
+ requests = [
273
+ ((a, b), c) for a, b, c in zip(context, context_encoding, all_gen_kwargs)
274
+ ]
275
+
276
+ def _collate_gen(_requests):
277
+ # the negative sign on len(toks) sorts descending - this has a few advantages:
278
+ # - time estimates will always be over not underestimates, which is more useful for planning
279
+ # - to know the size of a batch when going through the list, you know the first one is always the batch
280
+ # padded context length. this is useful to simplify the batching logic and more importantly to make
281
+ # automatic adaptive batches much much easier to implement
282
+ # - any OOMs will happen right away rather than near the end
283
+ return -len(_requests[0][1]), _requests[0][0]
284
+
285
+ # we group requests by their generation_kwargs,
286
+ # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
287
+ # in the same batch.
288
+ re_ords = Collator(requests, _collate_gen, group_by="gen_kwargs")
289
+ chunks = re_ords.get_batched(
290
+ n=int(self.batch_size) if self.batch_size != "auto" else 0, batch_fn=None
291
+ )
292
+
293
+ pbar = tqdm(
294
+ total=len(requests),
295
+ disable=(disable_tqdm or (self.rank != 0)),
296
+ desc="Running generate_until requests",
297
+ )
298
+ # for each different set of kwargs, we execute all requests, by batch.
299
+ for chunk in chunks:
300
+ context_and_encoding, all_gen_kwargs = zip(*chunk)
301
+ context, context_encoding = zip(*context_and_encoding)
302
+ # we assume all gen kwargs in the batch are the same
303
+ # this is safe to assume because the `grouper` object ensures it.
304
+ gen_kwargs = all_gen_kwargs[0]
305
+ # unpack our keyword arguments.
306
+ until = None
307
+ if isinstance(gen_kwargs, dict):
308
+ kwargs = copy.deepcopy(gen_kwargs) # edge case for repeats > 1
309
+ if "until" in kwargs.keys():
310
+ until = kwargs.pop("until")
311
+ if isinstance(until, str):
312
+ until = [until]
313
+ elif not isinstance(until, list):
314
+ raise ValueError(
315
+ f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}"
316
+ )
317
+ else:
318
+ raise ValueError(
319
+ f"Expected `kwargs` to be of type `dict` but got {gen_kwargs}"
320
+ )
321
+ # add EOS token to stop sequences
322
+ eos = self.tokenizer.decode(self.eot_token_id)
323
+ if not until:
324
+ until = [eos]
325
+ else:
326
+ until.append(eos)
327
+ if "max_gen_toks" in kwargs.keys():
328
+ max_gen_toks = kwargs.pop("max_gen_toks")
329
+ else:
330
+ max_gen_toks = self.max_gen_toks
331
+
332
+ # set the max length in tokens of inputs ("context_enc")
333
+ # max len for inputs = max length, minus room to generate the max new tokens
334
+ max_ctx_len = self.max_length - max_gen_toks
335
+ context_encoding = [x[-max_ctx_len:] for x in context_encoding]
336
+
337
+ # perform batched generation
338
+ cont = self._model_generate(
339
+ requests=context_encoding,
340
+ generate=True,
341
+ max_tokens=max_gen_toks,
342
+ stop=until,
343
+ **kwargs,
344
+ )
345
+
346
+ # cache generations
347
+ for output, context in zip(cont, context):
348
+ generated_text = output.outputs[0].text
349
+ res.append(generated_text)
350
+ self.cache_hook.add_partial(
351
+ "generate_until", (context, gen_kwargs), generated_text
352
+ )
353
+ pbar.update(1)
354
+
355
+ pbar.close()
356
+ # reorder all group of results back to original unsorted form
357
+ return re_ords.get_original(res)
358
+
359
+ def _loglikelihood_tokens(
360
+ self,
361
+ requests: List[Tuple[Tuple[str, str], List[int], List[int]]],
362
+ disable_tqdm: bool = False,
363
+ ) -> List[Tuple[float, bool]]:
364
+ res = []
365
+
366
+ def _collate(x):
367
+ toks = x[1] + x[2]
368
+ return -len(toks), tuple(toks)
369
+
370
+ # Reorder requests by length and batch
371
+ re_ord = Collator(requests, sort_fn=_collate)
372
+ chunks = re_ord.get_batched(
373
+ n=int(self.batch_size) if self.batch_size != "auto" else 0, batch_fn=None
374
+ )
375
+
376
+ pbar = tqdm(
377
+ total=len(requests),
378
+ disable=disable_tqdm,
379
+ desc="Running loglikelihood requests",
380
+ )
381
+ for chunk in chunks:
382
+ inputs = []
383
+ ctxlens = []
384
+ for cache_key, context_enc, continuation_enc in chunk:
385
+ inp = (context_enc + continuation_enc)[-(self.max_length) :]
386
+ ctxlen = len(context_enc) - max(
387
+ 0, len(context_enc) + len(continuation_enc) - (self.max_length)
388
+ )
389
+
390
+ inputs.append(inp)
391
+ ctxlens.append(ctxlen)
392
+
393
+ outputs = self._model_generate(requests=inputs, generate=False)
394
+
395
+ for output, ctxlen, (cache_key, _, _), inp in zip(
396
+ outputs, ctxlens, chunk, inputs
397
+ ):
398
+ answer = self._parse_logprobs(
399
+ tokens=inp,
400
+ outputs=output,
401
+ ctxlen=ctxlen,
402
+ )
403
+
404
+ res.append(answer)
405
+
406
+ # partial caching
407
+ if cache_key is not None:
408
+ self.cache_hook.add_partial("loglikelihood", cache_key, answer)
409
+ pbar.update(1)
410
+ pbar.close()
411
+ return re_ord.get_original(res)
412
+
413
+ @staticmethod
414
+ def _parse_logprobs(tokens: List, outputs, ctxlen: int) -> Tuple[float, bool]:
415
+ """Process logprobs and tokens.
416
+
417
+ :param tokens: list
418
+ Input tokens (potentially left-truncated)
419
+ :param outputs: RequestOutput
420
+ Contains prompt_logprobs
421
+ :param ctxlen: int
422
+ Length of context (so we can slice them away and only keep the predictions)
423
+ :return:
424
+ continuation_logprobs: float
425
+ Log probabilities of continuation tokens
426
+ is_greedy: bool
427
+ Whether argmax matches given continuation exactly
428
+ """
429
+
430
+ # The first entry of prompt_logprobs is None because the model has no previous tokens to condition on.
431
+ continuation_logprobs_dicts = outputs.prompt_logprobs
432
+
433
+ def coerce_logprob_to_num(logprob):
434
+ # vLLM changed the return type of logprobs from float
435
+ # to a Logprob object storing the float value + extra data
436
+ # (https://github.com/vllm-project/vllm/pull/3065).
437
+ # If we are dealing with vllm's Logprob object, return
438
+ # the logprob value stored as an attribute. Otherwise,
439
+ # return the object itself (which should be a float
440
+ # for older versions of vLLM).
441
+ return getattr(logprob, "logprob", logprob)
442
+
443
+ continuation_logprobs_dicts = [
444
+ {
445
+ token: coerce_logprob_to_num(logprob)
446
+ for token, logprob in logprob_dict.items()
447
+ }
448
+ if logprob_dict is not None
449
+ else None
450
+ for logprob_dict in continuation_logprobs_dicts
451
+ ]
452
+
453
+ # Calculate continuation_logprobs
454
+ # assume ctxlen always >= 1
455
+ continuation_logprobs = sum(
456
+ logprob_dict.get(token)
457
+ for token, logprob_dict in zip(
458
+ tokens[ctxlen:], continuation_logprobs_dicts[ctxlen:]
459
+ )
460
+ )
461
+
462
+ # Determine if is_greedy
463
+ is_greedy = True
464
+ for token, logprob_dict in zip(
465
+ tokens[ctxlen:], continuation_logprobs_dicts[ctxlen:]
466
+ ):
467
+ # Get the token with the maximum log probability from the logprob_dict
468
+ if logprob_dict: # Ensure the logprob_dict is not None
469
+ top_token = max(logprob_dict, key=logprob_dict.get)
470
+ if top_token != token:
471
+ is_greedy = False
472
+ break
473
+
474
+ return continuation_logprobs, is_greedy
475
+
476
+ @staticmethod
477
+ def modify_gen_kwargs(kwargs: dict) -> dict:
478
+ # sampling_params
479
+ do_sample = kwargs.pop("do_sample", None)
480
+ if do_sample is False or "temperature" not in kwargs:
481
+ kwargs["temperature"] = 0.0
482
+ # hf defaults
483
+ kwargs["skip_special_tokens"] = kwargs.get("skip_special_tokens", False)
484
+ kwargs["spaces_between_special_tokens"] = kwargs.get(
485
+ "spaces_between_special_tokens", False
486
+ )
487
+ return kwargs
lm-evaluation/build/lib/lm_eval/prompts/__init__.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import os
3
+ from typing import Dict
4
+
5
+ from lm_eval import utils
6
+ from lm_eval.utils import eval_logger
7
+
8
+
9
+ # Prompt library.
10
+ # Stores prompts in a dictionary indexed by 2 levels:
11
+ # prompt category name, and prompt name.
12
+ # This allows us to access prompts
13
+ PROMPT_REGISTRY: Dict[str, Dict[str, str]] = {
14
+ "qa-basic": {
15
+ "question-newline-answer": "Question: {{question}}\nAnswer:",
16
+ "q-newline-a": "Q: {{question}}\nA:",
17
+ },
18
+ }
19
+
20
+
21
+ def get_prompt(prompt_id: str, dataset_name: str = None, subset_name: str = None):
22
+ # unpack prompt name
23
+ category_name, prompt_name = prompt_id.split(":")
24
+ if subset_name is None:
25
+ dataset_full_name = dataset_name
26
+ else:
27
+ dataset_full_name = f"{dataset_name}-{subset_name}"
28
+ eval_logger.info(f"Loading prompt from {category_name} for {dataset_full_name}")
29
+ if category_name == "promptsource":
30
+ try:
31
+ from promptsource.templates import DatasetTemplates
32
+ except ModuleNotFoundError:
33
+ raise Exception(
34
+ "Tried to load a Promptsource template, but promptsource is not installed ",
35
+ "please install promptsource via pip install lm-eval[promptsource] or pip install -e .[promptsource]",
36
+ )
37
+ try:
38
+ if subset_name is None:
39
+ prompts = DatasetTemplates(dataset_name=dataset_name)
40
+ else:
41
+ prompts = DatasetTemplates(
42
+ dataset_name=dataset_name, subset_name=subset_name
43
+ )
44
+ except Exception:
45
+ raise ValueError(f"{dataset_name} and {subset_name} not found")
46
+ if prompt_name in prompts.all_template_names:
47
+ return prompts[prompt_name]
48
+ else:
49
+ raise ValueError(
50
+ f"{prompt_name} not in prompt list {prompts.all_template_names}"
51
+ )
52
+ elif ".yaml" in category_name:
53
+ import yaml
54
+
55
+ with open(category_name, "rb") as file:
56
+ prompt_yaml_file = yaml.full_load(file)
57
+
58
+ prompt_string = prompt_yaml_file["prompts"][prompt_name]
59
+ return PromptString(prompt_string)
60
+ else:
61
+ try:
62
+ return PROMPT_REGISTRY[category_name][prompt_name]
63
+ except Exception:
64
+ raise ValueError(
65
+ f"expected only a single `:` as separator between \
66
+ prompt category and name, but got `{prompt_id}` instead"
67
+ )
68
+
69
+
70
+ def load_prompt_list(
71
+ use_prompt: str, dataset_name=None, subset_name=None, yaml_path=None, **kwargs
72
+ ):
73
+ category_name, prompt_name = use_prompt.split(":")
74
+
75
+ if category_name == "promptsource":
76
+ from promptsource.templates import DatasetTemplates
77
+
78
+ if subset_name is None:
79
+ prompts = DatasetTemplates(dataset_name=dataset_name)
80
+ else:
81
+ prompts = DatasetTemplates(
82
+ dataset_name=dataset_name, subset_name=subset_name
83
+ )
84
+
85
+ prompt_list = utils.pattern_match(prompt_name, prompts.all_template_names)
86
+
87
+ elif ".yaml" in category_name:
88
+ import yaml
89
+
90
+ if yaml_path is not None:
91
+ category_name = os.path.realpath(os.path.join(yaml_path, category_name))
92
+
93
+ with open(category_name, "rb") as file:
94
+ prompt_yaml_file = yaml.full_load(file)
95
+
96
+ prompt_list = utils.pattern_match(
97
+ prompt_name, prompt_yaml_file["prompts"].keys()
98
+ )
99
+
100
+ # category_name, *prompt_name = use_prompt.split(":")
101
+ # TODO allow to multiple prompt naming
102
+ # if len(prompt_name) > 1:
103
+ # prompt_list = []
104
+ # for prompt in prompt_name:
105
+ # prompt_list.append(utils.pattern_match(prompt_name, prompts.all_template_names))
106
+ # else:
107
+ # prompt_list = utils.pattern_match(prompt_name, prompts.all_template_names)
108
+ return [":".join([category_name, prompt]) for prompt in prompt_list]
109
+
110
+
111
+ class PromptString:
112
+ def __init__(self, prompt_string):
113
+ self.prompt_string = prompt_string
114
+
115
+ def apply(self, doc):
116
+ doc_to_text = self.prompt_string["doc_to_text"]
117
+ doc_to_target = self.prompt_string["doc_to_target"]
118
+
119
+ # TODO need a way to process doc_to_choice
120
+ if "doc_to_choice" in self.prompt_string:
121
+ raise Exception("Not yet implemented to accept doc_to_choice")
122
+
123
+ text_string = utils.apply_template(doc_to_text, doc)
124
+ target_string = utils.apply_template(doc_to_target, doc)
125
+
126
+ return [text_string, target_string]
lm-evaluation/build/lib/lm_eval/tasks/arc/README.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ARC
2
+
3
+ ### Paper
4
+
5
+ Title: Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge
6
+
7
+ Abstract: https://arxiv.org/abs/1803.05457
8
+
9
+ The ARC dataset consists of 7,787 science exam questions drawn from a variety
10
+ of sources, including science questions provided under license by a research
11
+ partner affiliated with AI2. These are text-only, English language exam questions
12
+ that span several grade levels as indicated in the files. Each question has a
13
+ multiple choice structure (typically 4 answer options). The questions are sorted
14
+ into a Challenge Set of 2,590 “hard” questions (those that both a retrieval and
15
+ a co-occurrence method fail to answer correctly) and an Easy Set of 5,197 questions.
16
+
17
+ Homepage: https://allenai.org/data/arc
18
+
19
+
20
+ ### Citation
21
+
22
+ ```
23
+ @article{Clark2018ThinkYH,
24
+ title={Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
25
+ author={Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
26
+ journal={ArXiv},
27
+ year={2018},
28
+ volume={abs/1803.05457}
29
+ }
30
+ ```
31
+
32
+ ### Groups and Tasks
33
+
34
+ #### Groups
35
+
36
+ * `ai2_arc`: Evaluates `arc_easy` and `arc_challenge`
37
+
38
+ #### Tasks
39
+
40
+ * `arc_easy`
41
+ * `arc_challenge`
42
+
43
+ ### Checklist
44
+
45
+ For adding novel benchmarks/datasets to the library:
46
+ * [ ] Is the task an existing benchmark in the literature?
47
+ * [ ] Have you referenced the original paper that introduced the task?
48
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
49
+
50
+
51
+ If other tasks on this dataset are already supported:
52
+ * [ ] Is the "Main" variant of this task clearly denoted?
53
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
54
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/arc/arc_challenge.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: arc_easy.yaml
2
+ task: arc_challenge
3
+ dataset_name: ARC-Challenge
lm-evaluation/build/lib/lm_eval/tasks/arc/arc_easy.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - ai2_arc
3
+ task: arc_easy
4
+ dataset_path: allenai/ai2_arc
5
+ dataset_name: ARC-Easy
6
+ output_type: multiple_choice
7
+ training_split: train
8
+ validation_split: validation
9
+ test_split: test
10
+ doc_to_text: "Question: {{question}}\nAnswer:"
11
+ doc_to_target: "{{choices.label.index(answerKey)}}"
12
+ doc_to_choice: "{{choices.text}}"
13
+ should_decontaminate: true
14
+ doc_to_decontamination_query: "Question: {{question}}\nAnswer:"
15
+ metric_list:
16
+ - metric: acc
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ - metric: acc_norm
20
+ aggregation: mean
21
+ higher_is_better: true
22
+ metadata:
23
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/eus_proficiency/README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # EusProficiency
2
+
3
+ ### Paper
4
+
5
+ Title: Latxa: An Open Language Model and Evaluation Suite for Basque
6
+
7
+ Abstract: https://arxiv.org/abs/2403.20266
8
+
9
+ EusProficiency comprises 5,169 exercises on different topics from past EGA exams, the official C1-level certificate of proficiency in Basque. We collected the atarikoa exercises from EGA exams through the years 1998 to 2008. Atarikoa is the first qualifying test of EGA, which measures different aspects of language competency, such as reading comprehension, grammar, vocabulary, spelling, and writing. Each test generally has 85 multiple-choice questions, with 4 choices and a single correct answer.
10
+
11
+ Homepage: https://github.com/hitz-zentroa/latxa
12
+
13
+
14
+ ### Citation
15
+
16
+ ```
17
+ @misc{etxaniz2024latxa,
18
+ title={Latxa: An Open Language Model and Evaluation Suite for Basque},
19
+ author={Julen Etxaniz and Oscar Sainz and Naiara Perez and Itziar Aldabe and German Rigau and Eneko Agirre and Aitor Ormazabal and Mikel Artetxe and Aitor Soroa},
20
+ year={2024},
21
+ eprint={2403.20266},
22
+ archivePrefix={arXiv},
23
+ primaryClass={cs.CL}
24
+ }
25
+ ```
26
+
27
+ ### Groups and Tasks
28
+
29
+ #### Groups
30
+
31
+ There are no groups.
32
+
33
+ #### Tasks
34
+
35
+ * `eus_proficiency`: EusProficiency comprises 5,169 exercises on different topics from past EGA exams, the official C1-level certificate of proficiency in Basque.
36
+
37
+ ### Checklist
38
+
39
+ For adding novel benchmarks/datasets to the library:
40
+ * [ ] Is the task an existing benchmark in the literature?
41
+ * [ ] Have you referenced the original paper that introduced the task?
42
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
43
+
44
+
45
+ If other tasks on this dataset are already supported:
46
+ * [ ] Is the "Main" variant of this task clearly denoted?
47
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
48
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/eus_proficiency/eus_proficiency.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: HiTZ/EusProficiency
2
+ dataset_name: default
3
+ task: eus_proficiency
4
+ doc_to_text: "Galdera: {{question}}\nA: {{candidates[0]}}\nB: {{candidates[1]}}\nC: {{candidates[2]}}\nD: {{candidates[3]}}\nErantzuna:"
5
+ doc_to_choice: ["A", "B", "C", "D"]
6
+ validation_split: null
7
+ test_split: test
8
+ fewshot_split: test
9
+ output_type: multiple_choice
10
+ doc_to_target: answer
11
+ metric_list:
12
+ - metric: acc
13
+ aggregation: mean
14
+ higher_is_better: true
15
+ metadata:
16
+ version: 0.0
lm-evaluation/build/lib/lm_eval/tasks/eus_reading/README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # EusReading
2
+
3
+ ### Paper
4
+
5
+ Title: Latxa: An Open Language Model and Evaluation Suite for Basque
6
+
7
+ Abstract: https://arxiv.org/abs/2403.20266
8
+
9
+ EusReading consists of 352 reading comprehension exercises (irakurmena) sourced from the set of past EGA exams from 1998 to 2008. Each test generally has 10 multiple-choice questions, with 4 choices and a single correct answer. These exercises are more challenging than Belebele due to the complexity and length of the input texts. As a result, EusReading is useful to measure long context understanding of models.
10
+
11
+ Homepage: https://github.com/hitz-zentroa/latxa
12
+
13
+
14
+ ### Citation
15
+
16
+ ```
17
+ @misc{etxaniz2024latxa,
18
+ title={Latxa: An Open Language Model and Evaluation Suite for Basque},
19
+ author={Julen Etxaniz and Oscar Sainz and Naiara Perez and Itziar Aldabe and German Rigau and Eneko Agirre and Aitor Ormazabal and Mikel Artetxe and Aitor Soroa},
20
+ year={2024},
21
+ eprint={2403.20266},
22
+ archivePrefix={arXiv},
23
+ primaryClass={cs.CL}
24
+ }
25
+ ```
26
+
27
+ ### Groups and Tasks
28
+
29
+ #### Groups
30
+
31
+ There are no groups.
32
+
33
+ #### Tasks
34
+
35
+ * `eus_reading`: EusReading consists of 352 reading comprehension exercises (irakurmena) sourced from the set of past EGA exams from 1998 to 2008.
36
+
37
+ ### Checklist
38
+
39
+ For adding novel benchmarks/datasets to the library:
40
+ * [ ] Is the task an existing benchmark in the literature?
41
+ * [ ] Have you referenced the original paper that introduced the task?
42
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
43
+
44
+
45
+ If other tasks on this dataset are already supported:
46
+ * [ ] Is the "Main" variant of this task clearly denoted?
47
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
48
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/eus_reading/eus_reading.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: HiTZ/EusReading
2
+ dataset_name: default
3
+ task: eus_reading
4
+ doc_to_text: !function utils.doc_to_text_context
5
+ doc_to_choice: !function utils.doc_to_choice
6
+ validation_split: null
7
+ test_split: test
8
+ fewshot_split: test
9
+ output_type: multiple_choice
10
+ doc_to_target: answer
11
+ metric_list:
12
+ - metric: acc
13
+ aggregation: mean
14
+ higher_is_better: true
15
+ metadata:
16
+ version: 0.0
lm-evaluation/build/lib/lm_eval/tasks/eus_reading/utils.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+
4
+ letters = ["A", "B", "C", "D"]
5
+
6
+
7
+ def doc_to_text_context(doc) -> str:
8
+ """
9
+ Converts a document to a formatted string.
10
+
11
+ Args:
12
+ doc (dict): A dictionary containing the document information.
13
+
14
+ Returns:
15
+ str: A formatted string containing the question and answer choices.
16
+ """
17
+ candidates = doc["candidates"]
18
+ num_choices = len(candidates)
19
+ if num_choices < 2:
20
+ raise ValueError("Invalid number of candidates")
21
+ choices = letters[:num_choices]
22
+ formatted_choices = "\n".join(
23
+ [f"{choice}: {candidates[i]}" for i, choice in enumerate(choices)]
24
+ )
25
+ return f"Pasartea: {doc['context']}\n\nGaldera: {doc['question']}\n{formatted_choices}\nErantzuna:"
26
+
27
+
28
+ def doc_to_choice(doc) -> List[str]:
29
+ """
30
+ Returns the answer choices for a document.
31
+
32
+ Args:
33
+ doc (dict): A dictionary containing the document information.
34
+
35
+ Returns:
36
+ list: A list of strings containing the answer choices.
37
+ """
38
+ num_choices = len(doc["candidates"])
39
+ if num_choices < 2:
40
+ raise ValueError("Invalid number of candidates")
41
+ return letters[:num_choices]
lm-evaluation/build/lib/lm_eval/tasks/haerae/README.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # HAE-RAE BENCH
2
+
3
+ ### Paper
4
+
5
+ Title: `HAE-RAE Bench: Evaluation of Korean Knowledge in Language Models`
6
+
7
+ Abstract: `Large Language Models (LLMs) trained on massive corpora demonstrate impressive capabilities in a wide range of tasks. While there are ongoing efforts to adapt these models to languages beyond English, the attention given to their evaluation methodologies remains limited. Current multilingual benchmarks often rely on back translations or re-implementations of English tests, limiting their capacity to capture unique cultural and linguistic nuances. To bridge this gap for the Korean language, we introduce HAE-RAE Bench, a dataset curated to challenge models lacking Korean cultural and contextual depth. The dataset encompasses six downstream tasks across four domains: vocabulary, history, general knowledge, and reading comprehension. Contrary to traditional evaluation suites focused on token or sequence classification and specific mathematical or logical reasoning, HAE-RAE Bench emphasizes a model's aptitude for recalling Korean-specific knowledge and cultural contexts. Comparative analysis with prior Korean benchmarks indicates that the HAE-RAE Bench presents a greater challenge to non-native models, by disturbing abilities and knowledge learned from English being transferred.`
8
+
9
+ Homepage: https://huggingface.co/datasets/HAERAE-HUB/HAE_RAE_BENCH
10
+
11
+ ### Citation
12
+
13
+ @misc{son2023haerae,
14
+ title={HAE-RAE Bench: Evaluation of Korean Knowledge in Language Models},
15
+ author={Guijin Son and Hanwool Lee and Suwan Kim and Huiseo Kim and Jaecheol Lee and Je Won Yeom and Jihyu Jung and Jung Woo Kim and Songseong Kim},
16
+ year={2023},
17
+ eprint={2309.02706},
18
+ archivePrefix={arXiv},
19
+ primaryClass={cs.CL}
20
+ }
21
+
22
+ ### Groups and Tasks
23
+
24
+ #### Groups
25
+
26
+ * `haerae`: 'It consists of five tasks provided in the HAERAE-BENCH paper. 'Reading Comprehension' was excluded from the implementation due to copyright issues. We will include it in the next haerae update. For other tasks, some part of data may be replaced or increased with the production of Haerae v1.1. Please note this when using it.'
27
+
28
+ #### Tasks
29
+
30
+ The following tasks evaluate subjects in the HaeRae dataset
31
+
32
+ - `haerae_standard_nomenclature`
33
+ - `haerae_loan_word`
34
+ - `haerae_rare_word`
35
+ - `haerae_general_knowledge`
36
+ - `haerae_history`
37
+
38
+ ### Checklist
39
+
40
+ For adding novel benchmarks/datasets to the library:
41
+ * [x] Is the task an existing benchmark in the literature?
42
+ * [x] Have you referenced the original paper that introduced the task?
43
+ * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
44
+
45
+
46
+ If other tasks on this dataset are already supported:
47
+ * [ ] Is the "Main" variant of this task clearly denoted?
48
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
49
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/haerae/_default_haerae_yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: haerae
2
+ dataset_path: HAERAE-HUB/HAE_RAE_BENCH
3
+ test_split: test
4
+ fewshot_split: test
5
+ output_type: multiple_choice
6
+ doc_to_text: "{{query}}"
7
+ doc_to_choice: ["(A)", "(B)", "(C)", "(D)", "(E)"]
8
+ doc_to_target: "{{answer}}"
9
+ metric_list:
10
+ - metric: acc
11
+ aggregation: mean
12
+ higher_is_better: true
13
+ - metric: acc_norm
14
+ aggregation: mean
15
+ higher_is_better: true
16
+ metadata:
17
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/haerae/haerae_gk.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ "dataset_name": "general_knowledge"
2
+ "include": "_default_haerae_yaml"
3
+ "task": "haerae_general_knowledge"
lm-evaluation/build/lib/lm_eval/tasks/haerae/haerae_hi.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ "dataset_name": "history"
2
+ "include": "_default_haerae_yaml"
3
+ "task": "haerae_history"
lm-evaluation/build/lib/lm_eval/tasks/haerae/haerae_lw.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ "dataset_name": "loan_words"
2
+ "include": "_default_haerae_yaml"
3
+ "task": "haerae_loan_word"
lm-evaluation/build/lib/lm_eval/tasks/haerae/haerae_rw.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ "dataset_name": "rare_words"
2
+ "include": "_default_haerae_yaml"
3
+ "task": "haerae_rare_word"
lm-evaluation/build/lib/lm_eval/tasks/haerae/haerae_sn.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ "dataset_name": "standard_nomenclature"
2
+ "include": "_default_haerae_yaml"
3
+ "task": "haerae_standard_nomenclature"
lm-evaluation/build/lib/lm_eval/tasks/lambada_cloze/README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LAMBADA Cloze
2
+
3
+ ### Paper
4
+
5
+ Title: `The LAMBADA dataset: Word prediction requiring a broad discourse context`
6
+
7
+ Abstract: https://arxiv.org/abs/1606.06031
8
+
9
+ Cloze-style LAMBADA dataset.
10
+ LAMBADA is a dataset to evaluate the capabilities of computational models for text
11
+ understanding by means of a word prediction task. LAMBADA is a collection of narrative
12
+ passages sharing the characteristic that human subjects are able to guess their last
13
+ word if they are exposed to the whole passage, but not if they only see the last
14
+ sentence preceding the target word. To succeed on LAMBADA, computational models
15
+ cannot simply rely on local context, but must be able to keep track of information
16
+ in the broader discourse.
17
+
18
+ Homepage: https://zenodo.org/record/2630551#.X4Xzn5NKjUI
19
+
20
+
21
+ ### Citation
22
+
23
+ ```
24
+ @misc{
25
+ author={Paperno, Denis and Kruszewski, Germán and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fernández, Raquel},
26
+ title={The LAMBADA dataset},
27
+ DOI={10.5281/zenodo.2630551},
28
+ publisher={Zenodo},
29
+ year={2016},
30
+ month={Aug}
31
+ }
32
+ ```
33
+
34
+ ### Groups and Tasks
35
+
36
+ #### Groups
37
+
38
+ * `lambada_cloze`
39
+
40
+ #### Tasks
41
+
42
+ * `lambada_openai_cloze_yaml`
43
+ * `lambada_standard_cloze_yaml`
44
+
45
+ ### Checklist
46
+
47
+ For adding novel benchmarks/datasets to the library:
48
+ * [ ] Is the task an existing benchmark in the literature?
49
+ * [ ] Have you referenced the original paper that introduced the task?
50
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
51
+
52
+
53
+ If other tasks on this dataset are already supported:
54
+ * [ ] Is the "Main" variant of this task clearly denoted?
55
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
56
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/lambada_cloze/lambada_openai_cloze.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - lambada_cloze
3
+ task: lambada_openai_cloze_yaml
4
+ dataset_path: EleutherAI/lambada_openai
5
+ dataset_name: default
6
+ output_type: loglikelihood
7
+ test_split: test
8
+ doc_to_text: "{{text.split(' ')[:-1]|join(' ')}} ____. ->"
9
+ doc_to_target: "{{' '+text.split(' ')[-1]}}"
10
+ should_decontaminate: true
11
+ doc_to_decontamination_query: "{{text}}"
12
+ metric_list:
13
+ - metric: perplexity
14
+ aggregation: perplexity
15
+ higher_is_better: false
16
+ - metric: acc
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ metadata:
20
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/lambada_cloze/lambada_standard_cloze.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - lambada_cloze
3
+ task: lambada_standard_cloze_yaml
4
+ dataset_path: lambada
5
+ dataset_name: null
6
+ output_type: loglikelihood
7
+ validation_split: validation
8
+ test_split: test
9
+ doc_to_text: "{{text.split(' ')[:-1]|join(' ')}} ____. ->"
10
+ doc_to_target: "{{' '+text.split(' ')[-1]}}"
11
+ should_decontaminate: true
12
+ doc_to_decontamination_query: "{{text}}"
13
+ metric_list:
14
+ - metric: perplexity
15
+ aggregation: perplexity
16
+ higher_is_better: false
17
+ - metric: acc
18
+ aggregation: mean
19
+ higher_is_better: true
20
+ metadata:
21
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/logiqa/README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LogiQA
2
+
3
+ ### Paper
4
+
5
+ Title: `LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning`
6
+
7
+ Abstract: https://arxiv.org/abs/2007.08124
8
+
9
+ LogiQA is a dataset for testing human logical reasoning. It consists of 8,678 QA
10
+ instances, covering multiple types of deductive reasoning. Results show that state-
11
+ of-the-art neural models perform by far worse than human ceiling. The dataset can
12
+ also serve as a benchmark for reinvestigating logical AI under the deep learning
13
+ NLP setting.
14
+
15
+ Homepage: https://github.com/lgw863/LogiQA-dataset
16
+
17
+
18
+ ### Citation
19
+
20
+ ```
21
+ @misc{liu2020logiqa,
22
+ title={LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning},
23
+ author={Jian Liu and Leyang Cui and Hanmeng Liu and Dandan Huang and Yile Wang and Yue Zhang},
24
+ year={2020},
25
+ eprint={2007.08124},
26
+ archivePrefix={arXiv},
27
+ primaryClass={cs.CL}
28
+ }
29
+ ```
30
+
31
+ ### Groups and Tasks
32
+
33
+ #### Groups
34
+
35
+ * Not part of a group yet
36
+
37
+ #### Tasks
38
+
39
+ * `logiqa`
40
+
41
+ ### Checklist
42
+
43
+ For adding novel benchmarks/datasets to the library:
44
+ * [ ] Is the task an existing benchmark in the literature?
45
+ * [ ] Have you referenced the original paper that introduced the task?
46
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
47
+
48
+
49
+ If other tasks on this dataset are already supported:
50
+ * [ ] Is the "Main" variant of this task clearly denoted?
51
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
52
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/logiqa/logiqa.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: logiqa
2
+ dataset_path: EleutherAI/logiqa
3
+ dataset_name: logiqa
4
+ output_type: multiple_choice
5
+ training_split: train
6
+ validation_split: validation
7
+ test_split: test
8
+ doc_to_choice: "{{options}}"
9
+ doc_to_text: !function utils_logiqa.doc_to_text
10
+ doc_to_target: !function utils_logiqa.doc_to_target
11
+ doc_to_decontamination_query: "{{context}}"
12
+ should_decontaminate: true
13
+ metric_list:
14
+ - metric: acc
15
+ aggregation: mean
16
+ higher_is_better: true
17
+ - metric: acc_norm
18
+ aggregation: mean
19
+ higher_is_better: true
20
+ metadata:
21
+ version: 1.0
22
+ dataset_kwargs:
23
+ trust_remote_code: true
lm-evaluation/build/lib/lm_eval/tasks/logiqa/utils_logiqa.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copied from Master
2
+ def doc_to_text(doc) -> str:
3
+ """
4
+ Passage: <passage>
5
+ Question: <question>
6
+ Choices:
7
+ A. <choice1>
8
+ B. <choice2>
9
+ C. <choice3>
10
+ D. <choice4>
11
+ Answer:
12
+ """
13
+ choices = ["a", "b", "c", "d"]
14
+ prompt = "Passage: " + doc["context"] + "\n"
15
+ prompt += "Question: " + doc["question"] + "\nChoices:\n"
16
+ for choice, option in zip(choices, doc["options"]):
17
+ prompt += f"{choice.upper()}. {option}\n"
18
+ prompt += "Answer:"
19
+ return prompt
20
+
21
+
22
+ def doc_to_target(doc) -> int:
23
+ choices = ["a", "b", "c", "d"]
24
+ return choices.index(doc["label"].strip())