applied-ai-018 commited on
Commit
8397dcc
·
verified ·
1 Parent(s): 8a669e8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/build/lib/lm_eval/__init__.py +4 -0
  2. lm-evaluation/build/lib/lm_eval/__main__.py +417 -0
  3. lm-evaluation/build/lib/lm_eval/caching/cache.py +55 -0
  4. lm-evaluation/build/lib/lm_eval/evaluator.py +583 -0
  5. lm-evaluation/build/lib/lm_eval/evaluator_utils.py +312 -0
  6. lm-evaluation/build/lib/lm_eval/logging_utils.py +455 -0
  7. lm-evaluation/build/lib/lm_eval/tasks/__init__.py +446 -0
  8. lm-evaluation/build/lib/lm_eval/tasks/anli/README.md +56 -0
  9. lm-evaluation/build/lib/lm_eval/tasks/anli/anli_r1.yaml +26 -0
  10. lm-evaluation/build/lib/lm_eval/tasks/anli/anli_r2.yaml +5 -0
  11. lm-evaluation/build/lib/lm_eval/tasks/anli/anli_r3.yaml +5 -0
  12. lm-evaluation/build/lib/lm_eval/tasks/arithmetic/README.md +60 -0
  13. lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_1dc.yaml +18 -0
  14. lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_2da.yaml +5 -0
  15. lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_2dm.yaml +5 -0
  16. lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_2ds.yaml +5 -0
  17. lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_3da.yaml +5 -0
  18. lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_3ds.yaml +5 -0
  19. lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_4da.yaml +5 -0
  20. lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_4ds.yaml +5 -0
  21. lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_5da.yaml +5 -0
  22. lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_5ds.yaml +5 -0
  23. lm-evaluation/build/lib/lm_eval/tasks/bbh/README.md +49 -0
  24. lm-evaluation/build/lib/lm_eval/tasks/bbh/_generate_configs.py +80 -0
  25. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/boolean_expressions.yaml +18 -0
  26. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/causal_judgement.yaml +18 -0
  27. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/date_understanding.yaml +20 -0
  28. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/formal_fallacies.yaml +18 -0
  29. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/geometric_shapes.yaml +20 -0
  30. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/hyperbaton.yaml +20 -0
  31. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_five_objects.yaml +19 -0
  32. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_seven_objects.yaml +19 -0
  33. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_three_objects.yaml +19 -0
  34. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/movie_recommendation.yaml +19 -0
  35. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/multistep_arithmetic_two.yaml +18 -0
  36. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/navigate.yaml +17 -0
  37. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/penguins_in_a_table.yaml +19 -0
  38. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/reasoning_about_colored_objects.yaml +19 -0
  39. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/salient_translation_error_detection.yaml +19 -0
  40. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/sports_understanding.yaml +21 -0
  41. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/temporal_sequences.yaml +19 -0
  42. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_five_objects.yaml +19 -0
  43. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_seven_objects.yaml +19 -0
  44. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_three_objects.yaml +19 -0
  45. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/web_of_lies.yaml +20 -0
  46. lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/word_sorting.yaml +15 -0
  47. lm-evaluation/build/lib/lm_eval/tasks/coqa/README.md +43 -0
  48. lm-evaluation/build/lib/lm_eval/tasks/coqa/default.yaml +24 -0
  49. lm-evaluation/build/lib/lm_eval/tasks/coqa/utils.py +77 -0
  50. lm-evaluation/build/lib/lm_eval/tasks/eq_bench/README.md +55 -0
lm-evaluation/build/lib/lm_eval/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .evaluator import evaluate, simple_evaluate
2
+
3
+ import habana_frameworks.torch.gpu_migration
4
+ import habana_frameworks.torch.core as htcore
lm-evaluation/build/lib/lm_eval/__main__.py ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import logging
4
+ import os
5
+ import re
6
+ import sys
7
+ from functools import partial
8
+ from pathlib import Path
9
+ from typing import Union
10
+
11
+ import numpy as np
12
+
13
+ from lm_eval import evaluator, utils
14
+ from lm_eval.evaluator import request_caching_arg_to_dict
15
+ from lm_eval.logging_utils import WandbLogger
16
+ from lm_eval.tasks import TaskManager
17
+ from lm_eval.utils import make_table, simple_parse_args_string
18
+
19
+
20
+ DEFAULT_RESULTS_FILE = "results.json"
21
+
22
+
23
+ def _handle_non_serializable(o):
24
+ if isinstance(o, np.int64) or isinstance(o, np.int32):
25
+ return int(o)
26
+ elif isinstance(o, set):
27
+ return list(o)
28
+ else:
29
+ return str(o)
30
+
31
+
32
+ def _int_or_none_list_arg_type(max_len: int, value: str, split_char: str = ","):
33
+ def parse_value(item):
34
+ item = item.strip().lower()
35
+ if item == "none":
36
+ return None
37
+ try:
38
+ return int(item)
39
+ except ValueError:
40
+ raise argparse.ArgumentTypeError(f"{item} is not an integer or None")
41
+
42
+ items = [parse_value(v) for v in value.split(split_char)]
43
+ num_items = len(items)
44
+
45
+ if num_items == 1:
46
+ # Makes downstream handling the same for single and multiple values
47
+ items = items * max_len
48
+ elif num_items != max_len:
49
+ raise argparse.ArgumentTypeError(
50
+ f"Argument requires {max_len} integers or None, separated by '{split_char}'"
51
+ )
52
+
53
+ return items
54
+
55
+
56
+ def check_argument_types(parser: argparse.ArgumentParser):
57
+ """
58
+ Check to make sure all CLI args are typed, raises error if not
59
+ """
60
+ for action in parser._actions:
61
+ if action.dest != "help" and not action.const:
62
+ if action.type is None:
63
+ raise ValueError(
64
+ f"Argument '{action.dest}' doesn't have a type specified."
65
+ )
66
+ else:
67
+ continue
68
+
69
+
70
+ def setup_parser() -> argparse.ArgumentParser:
71
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
72
+ parser.add_argument(
73
+ "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`"
74
+ )
75
+ parser.add_argument(
76
+ "--tasks",
77
+ "-t",
78
+ default=None,
79
+ type=str,
80
+ metavar="task1,task2",
81
+ help="To get full list of tasks, use the command lm-eval --tasks list",
82
+ )
83
+ parser.add_argument(
84
+ "--model_args",
85
+ "-a",
86
+ default="",
87
+ type=str,
88
+ help="Comma separated string arguments for model, e.g. `pretrained=EleutherAI/pythia-160m,dtype=float32`",
89
+ )
90
+ parser.add_argument(
91
+ "--num_fewshot",
92
+ "-f",
93
+ type=int,
94
+ default=None,
95
+ metavar="N",
96
+ help="Number of examples in few-shot context",
97
+ )
98
+ parser.add_argument(
99
+ "--batch_size",
100
+ "-b",
101
+ type=str,
102
+ default=1,
103
+ metavar="auto|auto:N|N",
104
+ help="Acceptable values are 'auto', 'auto:N' or N, where N is an integer. Default 1.",
105
+ )
106
+ parser.add_argument(
107
+ "--max_batch_size",
108
+ type=int,
109
+ default=None,
110
+ metavar="N",
111
+ help="Maximal batch size to try with --batch_size auto.",
112
+ )
113
+ parser.add_argument(
114
+ "--device",
115
+ type=str,
116
+ default=None,
117
+ help="Device to use (e.g. cuda, cuda:0, cpu).",
118
+ )
119
+ parser.add_argument(
120
+ "--output_path",
121
+ "-o",
122
+ default=None,
123
+ type=str,
124
+ metavar="DIR|DIR/file.json",
125
+ help="The path to the output file where the result metrics will be saved. If the path is a directory and log_samples is true, the results will be saved in the directory. Else the parent directory will be used.",
126
+ )
127
+ parser.add_argument(
128
+ "--limit",
129
+ "-L",
130
+ type=float,
131
+ default=None,
132
+ metavar="N|0<N<1",
133
+ help="Limit the number of examples per task. "
134
+ "If <1, limit is a percentage of the total number of examples.",
135
+ )
136
+ parser.add_argument(
137
+ "--use_cache",
138
+ "-c",
139
+ type=str,
140
+ default=None,
141
+ metavar="DIR",
142
+ help="A path to a sqlite db file for caching model responses. `None` if not caching.",
143
+ )
144
+ parser.add_argument(
145
+ "--cache_requests",
146
+ type=str,
147
+ default=None,
148
+ choices=["true", "refresh", "delete"],
149
+ help="Speed up evaluation by caching the building of dataset requests. `None` if not caching.",
150
+ )
151
+ parser.add_argument(
152
+ "--check_integrity",
153
+ action="store_true",
154
+ help="Whether to run the relevant part of the test suite for the tasks.",
155
+ )
156
+ parser.add_argument(
157
+ "--write_out",
158
+ "-w",
159
+ action="store_true",
160
+ default=False,
161
+ help="Prints the prompt for the first few documents.",
162
+ )
163
+ parser.add_argument(
164
+ "--log_samples",
165
+ "-s",
166
+ action="store_true",
167
+ default=False,
168
+ help="If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis. Use with --output_path.",
169
+ )
170
+ parser.add_argument(
171
+ "--show_config",
172
+ action="store_true",
173
+ default=False,
174
+ help="If True, shows the the full config of all tasks at the end of the evaluation.",
175
+ )
176
+ parser.add_argument(
177
+ "--include_path",
178
+ type=str,
179
+ default=None,
180
+ metavar="DIR",
181
+ help="Additional path to include if there are external tasks to include.",
182
+ )
183
+ parser.add_argument(
184
+ "--gen_kwargs",
185
+ type=str,
186
+ default=None,
187
+ help=(
188
+ "String arguments for model generation on greedy_until tasks,"
189
+ " e.g. `temperature=0,top_k=0,top_p=0`."
190
+ ),
191
+ )
192
+ parser.add_argument(
193
+ "--verbosity",
194
+ "-v",
195
+ type=str.upper,
196
+ default="INFO",
197
+ metavar="CRITICAL|ERROR|WARNING|INFO|DEBUG",
198
+ help="Controls the reported logging error level. Set to DEBUG when testing + adding new task configurations for comprehensive log output.",
199
+ )
200
+ parser.add_argument(
201
+ "--wandb_args",
202
+ type=str,
203
+ default="",
204
+ help="Comma separated string arguments passed to wandb.init, e.g. `project=lm-eval,job_type=eval",
205
+ )
206
+ parser.add_argument(
207
+ "--predict_only",
208
+ "-x",
209
+ action="store_true",
210
+ default=False,
211
+ help="Use with --log_samples. Only model outputs will be saved and metrics will not be evaluated.",
212
+ )
213
+ parser.add_argument(
214
+ "--seed",
215
+ type=partial(_int_or_none_list_arg_type, 3),
216
+ default="0,1234,1234", # for backward compatibility
217
+ help=(
218
+ "Set seed for python's random, numpy and torch.\n"
219
+ "Accepts a comma-separated list of 3 values for python's random, numpy, and torch seeds, respectively, "
220
+ "or a single integer to set the same seed for all three.\n"
221
+ "The values are either an integer or 'None' to not set the seed. Default is `0,1234,1234` (for backward compatibility).\n"
222
+ "E.g. `--seed 0,None,8` sets `random.seed(0)` and `torch.manual_seed(8)`. Here numpy's seed is not set since the second value is `None`.\n"
223
+ "E.g, `--seed 42` sets all three seeds to 42."
224
+ ),
225
+ )
226
+ parser.add_argument(
227
+ "--trust_remote_code",
228
+ action="store_true",
229
+ help="Sets trust_remote_code to True to execute code to create HF Datasets from the Hub",
230
+ )
231
+
232
+ return parser
233
+
234
+
235
+ def parse_eval_args(parser: argparse.ArgumentParser) -> argparse.Namespace:
236
+ check_argument_types(parser)
237
+ return parser.parse_args()
238
+
239
+
240
+ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
241
+ if not args:
242
+ # we allow for args to be passed externally, else we parse them ourselves
243
+ parser = setup_parser()
244
+ args = parse_eval_args(parser)
245
+
246
+ if args.wandb_args:
247
+ wandb_logger = WandbLogger(**simple_parse_args_string(args.wandb_args))
248
+
249
+ eval_logger = utils.eval_logger
250
+ eval_logger.setLevel(getattr(logging, f"{args.verbosity}"))
251
+ eval_logger.info(f"Verbosity set to {args.verbosity}")
252
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
253
+
254
+ if args.predict_only:
255
+ args.log_samples = True
256
+ if (args.log_samples or args.predict_only) and not args.output_path:
257
+ raise ValueError(
258
+ "Specify --output_path if providing --log_samples or --predict_only"
259
+ )
260
+
261
+ if args.include_path is not None:
262
+ eval_logger.info(f"Including path: {args.include_path}")
263
+ task_manager = TaskManager(args.verbosity, include_path=args.include_path)
264
+
265
+ if args.limit:
266
+ eval_logger.warning(
267
+ " --limit SHOULD ONLY BE USED FOR TESTING."
268
+ "REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT."
269
+ )
270
+
271
+ if args.tasks is None:
272
+ eval_logger.error("Need to specify task to evaluate.")
273
+ sys.exit()
274
+ elif args.tasks == "list":
275
+ eval_logger.info(
276
+ "Available Tasks:\n - {}".format("\n - ".join(task_manager.all_tasks))
277
+ )
278
+ sys.exit()
279
+ else:
280
+ if os.path.isdir(args.tasks):
281
+ import glob
282
+
283
+ task_names = []
284
+ yaml_path = os.path.join(args.tasks, "*.yaml")
285
+ for yaml_file in glob.glob(yaml_path):
286
+ config = utils.load_yaml_config(yaml_file)
287
+ task_names.append(config)
288
+ else:
289
+ task_list = args.tasks.split(",")
290
+ task_names = task_manager.match_tasks(task_list)
291
+ for task in [task for task in task_list if task not in task_names]:
292
+ if os.path.isfile(task):
293
+ config = utils.load_yaml_config(task)
294
+ task_names.append(config)
295
+ task_missing = [
296
+ task for task in task_list if task not in task_names and "*" not in task
297
+ ] # we don't want errors if a wildcard ("*") task name was used
298
+
299
+ if task_missing:
300
+ missing = ", ".join(task_missing)
301
+ eval_logger.error(
302
+ f"Tasks were not found: {missing}\n"
303
+ f"{utils.SPACING}Try `lm-eval --tasks list` for list of available tasks",
304
+ )
305
+ raise ValueError(
306
+ f"Tasks not found: {missing}. Try `lm-eval --tasks list` for list of available tasks, or '--verbosity DEBUG' to troubleshoot task registration issues."
307
+ )
308
+
309
+ if args.output_path:
310
+ path = Path(args.output_path)
311
+ # check if file or 'dir/results.json' exists
312
+ if path.is_file():
313
+ raise FileExistsError(f"File already exists at {path}")
314
+ output_path_file = path.joinpath(DEFAULT_RESULTS_FILE)
315
+ if output_path_file.is_file():
316
+ eval_logger.warning(
317
+ f"File {output_path_file} already exists. Results will be overwritten."
318
+ )
319
+ # if path json then get parent dir
320
+ elif path.suffix in (".json", ".jsonl"):
321
+ output_path_file = path
322
+ path.parent.mkdir(parents=True, exist_ok=True)
323
+ path = path.parent
324
+ else:
325
+ path.mkdir(parents=True, exist_ok=True)
326
+
327
+ # Respect user's value passed in via CLI, otherwise default to True and add to comma-separated model args
328
+ if args.trust_remote_code:
329
+ os.environ["HF_DATASETS_TRUST_REMOTE_CODE"] = str(args.trust_remote_code)
330
+ args.model_args = (
331
+ args.model_args
332
+ + f",trust_remote_code={os.environ['HF_DATASETS_TRUST_REMOTE_CODE']}"
333
+ )
334
+
335
+ eval_logger.info(f"Selected Tasks: {task_names}")
336
+
337
+ request_caching_args = request_caching_arg_to_dict(
338
+ cache_requests=args.cache_requests
339
+ )
340
+
341
+ results = evaluator.simple_evaluate(
342
+ model=args.model,
343
+ model_args=args.model_args,
344
+ tasks=task_names,
345
+ num_fewshot=args.num_fewshot,
346
+ batch_size=args.batch_size,
347
+ max_batch_size=args.max_batch_size,
348
+ device=args.device,
349
+ use_cache=args.use_cache,
350
+ limit=args.limit,
351
+ check_integrity=args.check_integrity,
352
+ write_out=args.write_out,
353
+ log_samples=args.log_samples,
354
+ gen_kwargs=args.gen_kwargs,
355
+ task_manager=task_manager,
356
+ verbosity=args.verbosity,
357
+ predict_only=args.predict_only,
358
+ random_seed=args.seed[0],
359
+ numpy_random_seed=args.seed[1],
360
+ torch_random_seed=args.seed[2],
361
+ **request_caching_args,
362
+ )
363
+
364
+ if results is not None:
365
+ if args.log_samples:
366
+ samples = results.pop("samples")
367
+ dumped = json.dumps(
368
+ results, indent=2, default=_handle_non_serializable, ensure_ascii=False
369
+ )
370
+ if args.show_config:
371
+ print(dumped)
372
+
373
+ batch_sizes = ",".join(map(str, results["config"]["batch_sizes"]))
374
+
375
+ # Add W&B logging
376
+ if args.wandb_args:
377
+ try:
378
+ wandb_logger.post_init(results)
379
+ wandb_logger.log_eval_result()
380
+ if args.log_samples:
381
+ wandb_logger.log_eval_samples(samples)
382
+ except Exception as e:
383
+ eval_logger.info(f"Logging to Weights and Biases failed due to {e}")
384
+
385
+ if args.output_path:
386
+ output_path_file.open("w", encoding="utf-8").write(dumped)
387
+
388
+ if args.log_samples:
389
+ for task_name, config in results["configs"].items():
390
+ output_name = "{}_{}".format(
391
+ re.sub(r"[\"<>:/\|\\?\*\[\]]+", "__", args.model_args),
392
+ task_name,
393
+ )
394
+ filename = path.joinpath(f"{output_name}.jsonl")
395
+ samples_dumped = json.dumps(
396
+ samples[task_name],
397
+ indent=2,
398
+ default=_handle_non_serializable,
399
+ ensure_ascii=False,
400
+ )
401
+ filename.write_text(samples_dumped, encoding="utf-8")
402
+
403
+ print(
404
+ f"{args.model} ({args.model_args}), gen_kwargs: ({args.gen_kwargs}), limit: {args.limit}, num_fewshot: {args.num_fewshot}, "
405
+ f"batch_size: {args.batch_size}{f' ({batch_sizes})' if batch_sizes else ''}"
406
+ )
407
+ print(make_table(results))
408
+ if "groups" in results:
409
+ print(make_table(results, "groups"))
410
+
411
+ if args.wandb_args:
412
+ # Tear down wandb run once all the logging is done.
413
+ wandb_logger.run.finish()
414
+
415
+
416
+ if __name__ == "__main__":
417
+ cli_evaluate()
lm-evaluation/build/lib/lm_eval/caching/cache.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import os
3
+
4
+ import dill
5
+
6
+ from lm_eval.utils import eval_logger
7
+
8
+
9
+ MODULE_DIR = os.path.dirname(os.path.realpath(__file__))
10
+
11
+ OVERRIDE_PATH = os.getenv("LM_HARNESS_CACHE_PATH")
12
+
13
+
14
+ PATH = OVERRIDE_PATH if OVERRIDE_PATH else f"{MODULE_DIR}/.cache"
15
+
16
+ # This should be sufficient for uniqueness
17
+ HASH_INPUT = "EleutherAI-lm-evaluation-harness"
18
+
19
+ HASH_PREFIX = hashlib.sha256(HASH_INPUT.encode("utf-8")).hexdigest()
20
+
21
+ FILE_SUFFIX = f".{HASH_PREFIX}.pickle"
22
+
23
+
24
+ def load_from_cache(file_name):
25
+ try:
26
+ path = f"{PATH}/{file_name}{FILE_SUFFIX}"
27
+
28
+ with open(path, "rb") as file:
29
+ cached_task_dict = dill.loads(file.read())
30
+ return cached_task_dict
31
+
32
+ except Exception:
33
+ eval_logger.debug(f"{file_name} is not cached, generating...")
34
+ pass
35
+
36
+
37
+ def save_to_cache(file_name, obj):
38
+ if not os.path.exists(PATH):
39
+ os.mkdir(PATH)
40
+
41
+ file_path = f"{PATH}/{file_name}{FILE_SUFFIX}"
42
+
43
+ eval_logger.debug(f"Saving {file_path} to cache...")
44
+ with open(file_path, "wb") as file:
45
+ file.write(dill.dumps(obj))
46
+
47
+
48
+ # NOTE the "key" param is to allow for flexibility
49
+ def delete_cache(key: str = ""):
50
+ files = os.listdir(PATH)
51
+
52
+ for file in files:
53
+ if file.startswith(key) and file.endswith(FILE_SUFFIX):
54
+ file_path = f"{PATH}/{file}"
55
+ os.unlink(file_path)
lm-evaluation/build/lib/lm_eval/evaluator.py ADDED
@@ -0,0 +1,583 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import logging
3
+ import random
4
+ import time
5
+ from collections import defaultdict
6
+ from typing import TYPE_CHECKING, List, Optional, Union
7
+
8
+ import numpy as np
9
+ import torch
10
+
11
+ import lm_eval.api.metrics
12
+ import lm_eval.api.registry
13
+ import lm_eval.models
14
+ from lm_eval.caching.cache import delete_cache
15
+ from lm_eval.evaluator_utils import (
16
+ consolidate_results,
17
+ get_sample_size,
18
+ get_task_list,
19
+ prepare_print_tasks,
20
+ print_writeout,
21
+ run_task_tests,
22
+ )
23
+ from lm_eval.logging_utils import add_env_info, get_git_commit_hash
24
+ from lm_eval.tasks import TaskManager, get_task_dict
25
+ from lm_eval.utils import eval_logger, positional_deprecated, simple_parse_args_string
26
+
27
+
28
+ if TYPE_CHECKING:
29
+ from lm_eval.api.model import LM
30
+ from lm_eval.tasks import Task
31
+
32
+
33
+ @positional_deprecated
34
+ def simple_evaluate(
35
+ model,
36
+ model_args: Optional[Union[str, dict]] = None,
37
+ tasks: Optional[List[Union[str, dict, object]]] = None,
38
+ num_fewshot: Optional[int] = None,
39
+ batch_size: Optional[int] = None,
40
+ max_batch_size: Optional[int] = None,
41
+ device: Optional[str] = None,
42
+ use_cache: Optional[str] = None,
43
+ cache_requests: bool = False,
44
+ rewrite_requests_cache: bool = False,
45
+ delete_requests_cache: bool = False,
46
+ limit: Optional[Union[int, float]] = None,
47
+ bootstrap_iters: int = 100000,
48
+ check_integrity: bool = False,
49
+ write_out: bool = False,
50
+ log_samples: bool = True,
51
+ gen_kwargs: Optional[str] = None,
52
+ task_manager: Optional[TaskManager] = None,
53
+ verbosity: str = "INFO",
54
+ predict_only: bool = False,
55
+ random_seed: int = 0,
56
+ numpy_random_seed: int = 1234,
57
+ torch_random_seed: int = 1234,
58
+ ):
59
+ """Instantiate and evaluate a model on a list of tasks.
60
+
61
+ :param model: Union[str, LM]
62
+ Name of model or LM object, see lm_eval.models.get_model
63
+ :param model_args: Optional[str, dict]
64
+ String or dict arguments for each model class, see LM.create_from_arg_string and LM.create_from_arg_object.
65
+ Ignored if `model` argument is a LM object.
66
+ :param tasks: list[Union[str, dict, Task]]
67
+ List of task names or Task objects. Task objects will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise.
68
+ :param num_fewshot: int
69
+ Number of examples in few-shot context
70
+ :param batch_size: int or str, optional
71
+ Batch size for model
72
+ :param max_batch_size: int, optional
73
+ Maximal batch size to try with automatic batch size detection
74
+ :param device: str, optional
75
+ PyTorch device (e.g. "cpu" or "cuda:0") for running models
76
+ :param use_cache: str, optional
77
+ A path to a sqlite db file for caching model responses. `None` if not caching.
78
+ :param cache_requests: bool, optional
79
+ Speed up evaluation by caching the building of dataset requests. `None` if not caching.
80
+ :param rewrite_requests_cache: bool, optional
81
+ Rewrites all of the request cache if set to `True`. `None` if not desired.
82
+ :param delete_requests_cache: bool, optional
83
+ Deletes all of the request cache if set to `True`. `None` if not desired.
84
+ :param limit: int or float, optional
85
+ Limit the number of examples per task (only use this for testing), If <1, limit is a percentage of the total number of examples.
86
+ :param bootstrap_iters:
87
+ Number of iterations for bootstrap statistics
88
+ :param check_integrity: bool
89
+ Whether to run the relevant part of the test suite for the tasks
90
+ :param write_out: bool
91
+ If True, write out an example document and model input for checking task integrity
92
+ :param log_samples: bool
93
+ If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis
94
+ :param gen_kwargs: str
95
+ String arguments for model generation
96
+ Ignored for all tasks with loglikelihood output_type
97
+ :param predict_only: bool
98
+ If true only model outputs will be generated and returned. Metrics will not be evaluated
99
+ :param random_seed: int
100
+ Random seed for python's random module. If set to None, the seed will not be set.
101
+ :param numpy_random_seed: int
102
+ Random seed for numpy. If set to None, the seed will not be set.
103
+ :param torch_random_seed: int
104
+ Random seed for torch. If set to None, the seed will not be set.
105
+
106
+ :return
107
+ Dictionary of results
108
+ """
109
+ eval_logger.setLevel(getattr(logging, f"{verbosity}"))
110
+ start_date = time.time()
111
+
112
+ if delete_requests_cache:
113
+ eval_logger.info("Deleting requests cache...")
114
+ delete_cache()
115
+
116
+ seed_message = []
117
+ if random_seed is not None:
118
+ # See https://github.com/EleutherAI/lm-evaluation-harness/pull/1412
119
+ seed_message.append(f"Setting random seed to {random_seed}")
120
+ random.seed(random_seed)
121
+
122
+ if numpy_random_seed is not None:
123
+ seed_message.append(f"Setting numpy seed to {numpy_random_seed}")
124
+ np.random.seed(numpy_random_seed)
125
+
126
+ if torch_random_seed is not None:
127
+ seed_message.append(f"Setting torch manual seed to {torch_random_seed}")
128
+ torch.manual_seed(torch_random_seed)
129
+
130
+ if seed_message:
131
+ eval_logger.info(" | ".join(seed_message))
132
+
133
+ if tasks is None:
134
+ tasks = []
135
+ if len(tasks) == 0:
136
+ raise ValueError(
137
+ "No tasks specified, or no tasks found. Please verify the task names."
138
+ )
139
+
140
+ if gen_kwargs is not None:
141
+ gen_kwargs = simple_parse_args_string(gen_kwargs)
142
+ eval_logger.warning(
143
+ "generation_kwargs specified through cli, these settings will update set parameters in yaml tasks. "
144
+ "Ensure 'do_sample=True' for non-greedy decoding!"
145
+ )
146
+ if gen_kwargs == "":
147
+ gen_kwargs = None
148
+
149
+ if isinstance(model, str):
150
+ if model_args is None:
151
+ eval_logger.warning("model_args not specified. Using defaults.")
152
+ model_args = ""
153
+ if "pretrained" not in model_args and model in [
154
+ "hf-auto",
155
+ "hf",
156
+ "huggingface",
157
+ "vllm",
158
+ ]:
159
+ eval_logger.warning(
160
+ "pretrained not specified. Using default pretrained=gpt2."
161
+ )
162
+
163
+ if isinstance(model_args, dict):
164
+ eval_logger.info(
165
+ f"Initializing {model} model, with arguments: {model_args}"
166
+ )
167
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_obj(
168
+ model_args,
169
+ {
170
+ "batch_size": batch_size,
171
+ "max_batch_size": max_batch_size,
172
+ "device": device,
173
+ },
174
+ )
175
+
176
+ else:
177
+ eval_logger.info(
178
+ f"Initializing {model} model, with arguments: {simple_parse_args_string(model_args)}"
179
+ )
180
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
181
+ model_args,
182
+ {
183
+ "batch_size": batch_size,
184
+ "max_batch_size": max_batch_size,
185
+ "device": device,
186
+ },
187
+ )
188
+ else:
189
+ if not isinstance(model, lm_eval.api.model.LM):
190
+ raise TypeError
191
+ eval_logger.info("Using pre-initialized model")
192
+ lm = model
193
+
194
+ if use_cache is not None:
195
+ eval_logger.info(f"Using cache at {use_cache + '_rank' + str(lm.rank) + '.db'}")
196
+ lm = lm_eval.api.model.CachingLM(
197
+ lm,
198
+ use_cache
199
+ # each rank receives a different cache db.
200
+ # necessary to avoid multiple writes to cache at once
201
+ + "_rank"
202
+ + str(lm.rank)
203
+ + ".db",
204
+ )
205
+
206
+ if task_manager is None:
207
+ task_manager = TaskManager(verbosity)
208
+
209
+ task_dict = get_task_dict(tasks, task_manager)
210
+ for task_name in task_dict.keys():
211
+ task_obj = task_dict[task_name]
212
+ if isinstance(task_obj, tuple):
213
+ _, task_obj = task_obj
214
+ if task_obj is None:
215
+ continue
216
+
217
+ if task_obj.get_config("output_type") == "generate_until":
218
+ if gen_kwargs is not None:
219
+ task_obj.set_config(
220
+ key="generation_kwargs", value=gen_kwargs, update=True
221
+ )
222
+
223
+ if predict_only:
224
+ log_samples = True
225
+ eval_logger.info(
226
+ f"Processing {task_name} in output-only mode. Metrics will not be calculated!"
227
+ )
228
+ # we have to change the class properties post-hoc. This is pretty hacky.
229
+ task_obj.override_metric(metric_name="bypass")
230
+
231
+ # override tasks' fewshot values to the provided num_fewshot arg value
232
+ # except if tasks have it set to 0 manually in their configs--then we should never overwrite that
233
+ if num_fewshot is not None:
234
+ if (default_num_fewshot := task_obj.get_config("num_fewshot")) == 0:
235
+ eval_logger.info(
236
+ f"num_fewshot has been set to 0 for {task_name} in its config. Manual configuration will be ignored."
237
+ )
238
+ else:
239
+ eval_logger.warning(
240
+ f"Overwriting default num_fewshot of {task_name} from {default_num_fewshot} to {num_fewshot}"
241
+ )
242
+ task_obj.set_config(key="num_fewshot", value=num_fewshot)
243
+ else:
244
+ # if num_fewshot not provided, and the task does not define a default one, default to 0
245
+ if (default_num_fewshot := task_obj.get_config("num_fewshot")) is None:
246
+ task_obj.set_config(key="num_fewshot", value=0)
247
+
248
+ if check_integrity:
249
+ run_task_tests(task_list=tasks)
250
+
251
+ results = evaluate(
252
+ lm=lm,
253
+ task_dict=task_dict,
254
+ limit=limit,
255
+ cache_requests=cache_requests,
256
+ rewrite_requests_cache=rewrite_requests_cache,
257
+ bootstrap_iters=bootstrap_iters,
258
+ write_out=write_out,
259
+ log_samples=log_samples,
260
+ verbosity=verbosity,
261
+ )
262
+
263
+ if lm.rank == 0:
264
+ if isinstance(model, str):
265
+ model_name = model
266
+ elif hasattr(model, "config") and hasattr(model.config, "_name_or_path"):
267
+ model_name = model.config._name_or_path
268
+ else:
269
+ model_name = type(model).__name__
270
+
271
+ # add info about the model and few shot config
272
+ results["config"] = {
273
+ "model": model_name,
274
+ "model_args": model_args,
275
+ "batch_size": batch_size,
276
+ "batch_sizes": (
277
+ list(lm.batch_sizes.values()) if hasattr(lm, "batch_sizes") else []
278
+ ),
279
+ "device": device,
280
+ "use_cache": use_cache,
281
+ "limit": limit,
282
+ "bootstrap_iters": bootstrap_iters,
283
+ "gen_kwargs": gen_kwargs,
284
+ }
285
+ results["git_hash"] = get_git_commit_hash()
286
+ results["date"] = start_date
287
+ add_env_info(results) # additional environment info to results
288
+ return results
289
+ else:
290
+ return None
291
+
292
+
293
+ @positional_deprecated
294
+ def evaluate(
295
+ lm: "LM",
296
+ task_dict,
297
+ limit: Optional[int] = None,
298
+ cache_requests: bool = False,
299
+ rewrite_requests_cache: bool = False,
300
+ bootstrap_iters: Optional[int] = 100000,
301
+ write_out: bool = False,
302
+ log_samples: bool = True,
303
+ verbosity: str = "INFO",
304
+ ):
305
+ """Instantiate and evaluate a model on a list of tasks.
306
+
307
+ :param lm: obj
308
+ Language Model
309
+ :param task_dict: dict[str, Task]
310
+ Dictionary of tasks. Tasks will be taken to have name type(task).config.task .
311
+ :param limit: int, optional
312
+ Limit the number of examples per task (only use this for testing)
313
+ :param bootstrap_iters:
314
+ Number of iterations for bootstrap statistics
315
+ :param write_out: bool
316
+ If True, write out an example document and model input for checking task integrity
317
+ :param log_samples: bool
318
+ If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis
319
+ :return
320
+ Dictionary of results
321
+ """
322
+
323
+ eval_logger.setLevel(getattr(logging, f"{verbosity}"))
324
+
325
+ # tracks all Instances/requests a model must generate output on.
326
+ requests = defaultdict(list)
327
+ # stores the amount to pad out reqs per req. type so that
328
+ # number of fwd passes per distributed rank is equal
329
+ padding_requests = defaultdict(int)
330
+
331
+ # get lists of group hierarchy and each type of request
332
+ task_hierarchy, eval_tasks = get_task_list(task_dict)
333
+ if not log_samples:
334
+ if not all(
335
+ "bypass" not in getattr(task_output.task, "_metric_fn_list", {}).keys()
336
+ for task_output in eval_tasks
337
+ ):
338
+ raise ValueError("log_samples must be True for 'bypass' metric-only tasks")
339
+ for task_output in eval_tasks:
340
+ task: Task = task_output.task
341
+ limit = get_sample_size(task, limit)
342
+ task.build_all_requests(
343
+ limit=limit,
344
+ rank=lm.rank,
345
+ world_size=lm.world_size,
346
+ cache_requests=cache_requests,
347
+ rewrite_requests_cache=rewrite_requests_cache,
348
+ )
349
+ eval_logger.debug(
350
+ f"Task: {task_output.task_name}; number of requests on this rank: {len(task.instances)}"
351
+ )
352
+
353
+ if write_out:
354
+ print_writeout(task)
355
+ # aggregate Instances by LM method requested to get output.
356
+ for instance in task.instances:
357
+ reqtype = instance.request_type
358
+ requests[reqtype].append(instance)
359
+
360
+ if lm.world_size > 1:
361
+ instances_rnk = torch.tensor(len(task._instances), device=lm.device)
362
+ gathered_item = (
363
+ lm.accelerator.gather(instances_rnk).cpu().detach().numpy().tolist()
364
+ )
365
+ # "multiple_choice" task types dispatch (several) "loglikelihood" request types
366
+ reqtype = (
367
+ "loglikelihood"
368
+ if task.OUTPUT_TYPE == "multiple_choice"
369
+ else task.OUTPUT_TYPE
370
+ )
371
+ # compute number of pseudo-batches to pad with (FSDP/DDP require even batches among ranks)
372
+ numpad = max(gathered_item) - gathered_item[lm.rank]
373
+ # todo: may not account for padding in cases like SquadV2 which has multiple req types
374
+ padding_requests[reqtype] += numpad
375
+
376
+ ### Run LM on inputs, get all outputs ###
377
+ # execute each type of request
378
+ for reqtype, reqs in requests.items():
379
+ eval_logger.info(f"Running {reqtype} requests")
380
+ # create `K` copies of each request `req` based off `K = req.repeats`
381
+ cloned_reqs = []
382
+ for req in reqs:
383
+ cloned_reqs.extend([req] * req.repeats)
384
+
385
+ if (lm.world_size > 1) and (padding_requests[reqtype] > 0):
386
+ for _ in range(padding_requests[reqtype]):
387
+ cloned_reqs.extend([req] * req.repeats)
388
+
389
+ # run requests through model
390
+ resps = getattr(lm, reqtype)(cloned_reqs)
391
+
392
+ # put responses from model into a list of length K for each request.
393
+ for x, req in zip(resps, cloned_reqs):
394
+ req.resps.append(x)
395
+
396
+ if lm.world_size > 1:
397
+ lm.accelerator.wait_for_everyone()
398
+
399
+ RANK = lm.rank
400
+ WORLD_SIZE = lm.world_size
401
+ ### Postprocess outputs ###
402
+ # TODO: del model here, maybe (idea: allow user to specify device of e.g. reward model separately)
403
+ for task_output in eval_tasks:
404
+ task = task_output.task
405
+ task.apply_filters()
406
+
407
+ ### Collect values of metrics on all datapoints ###
408
+ # # unpack results and sort back in order and return control to Task
409
+ # TODO: make it possible to use a different metric per filter
410
+ # Pre-process task.instances to group by doc_id
411
+ instances_by_doc_id = defaultdict(list)
412
+ for instance in task.instances:
413
+ instances_by_doc_id[instance.doc_id].append(instance)
414
+ # Sort instances within each group
415
+ for instances in instances_by_doc_id.values():
416
+ instances.sort(key=lambda x: x.idx)
417
+ # iterate over different filters used
418
+ for filter_key in task.instances[0].filtered_resps.keys():
419
+ doc_iterator = task.doc_iterator(
420
+ rank=RANK, limit=limit, world_size=WORLD_SIZE
421
+ )
422
+ for doc_id, doc in doc_iterator:
423
+ requests = instances_by_doc_id[doc_id]
424
+ metrics = task.process_results(
425
+ doc, [req.filtered_resps[filter_key] for req in requests]
426
+ )
427
+ if log_samples:
428
+ target = task.doc_to_target(doc)
429
+ example = {
430
+ "doc_id": doc_id,
431
+ "doc": doc,
432
+ "target": target,
433
+ "arguments": [req.args for req in requests],
434
+ "resps": [req.resps for req in requests],
435
+ "filtered_resps": [
436
+ req.filtered_resps[filter_key] for req in requests
437
+ ],
438
+ }
439
+ example.update(metrics)
440
+ task_output.logged_samples.append(example)
441
+ for metric, value in metrics.items():
442
+ task_output.sample_metrics[(metric, filter_key)].append(value)
443
+
444
+ if WORLD_SIZE > 1:
445
+ # if multigpu, then gather data across all ranks to rank 0
446
+ # first gather logged samples across all ranks
447
+ for task_output in eval_tasks:
448
+ if log_samples:
449
+ # for task_name, task_samples in list(samples.items()):
450
+ full_samples = [None] * WORLD_SIZE
451
+ torch.distributed.all_gather_object(
452
+ obj=task_output.logged_samples,
453
+ object_list=full_samples,
454
+ )
455
+
456
+ if RANK == 0:
457
+ task_output.logged_samples = list(
458
+ itertools.chain.from_iterable(full_samples)
459
+ )
460
+
461
+ # then collect metrics across all ranks
462
+ for metrics in task_output.sample_metrics:
463
+ metric_list = [None] * WORLD_SIZE
464
+ torch.distributed.all_gather_object(
465
+ obj=task_output.sample_metrics[metrics],
466
+ object_list=metric_list,
467
+ )
468
+ if RANK == 0:
469
+ task_output.sample_metrics[metrics] = list(
470
+ itertools.chain.from_iterable(metric_list)
471
+ )
472
+
473
+ if RANK == 0:
474
+ ### Aggregate results over all datapoints ###
475
+ # aggregate results ; run bootstrap CIs
476
+ for task_output in eval_tasks:
477
+ task_output.calculate_aggregate_metric(bootstrap_iters=bootstrap_iters)
478
+ results, samples, configs, versions, num_fewshot = consolidate_results(
479
+ eval_tasks
480
+ )
481
+
482
+ ### Calculate group metrics ###
483
+ if bool(results):
484
+ for group, task_list in reversed(task_hierarchy.items()):
485
+ if len(task_list) == 0:
486
+ # task_hierarchy entries are either
487
+ # `group_name: [subtask1, subtask2, ...]`
488
+ # or `task_name: []`.
489
+ # we only want to operate on groups here.
490
+ continue
491
+ metric_list = list(
492
+ {
493
+ key
494
+ for task in task_list
495
+ for key in results[task].keys()
496
+ if "_stderr" not in key and key not in ["alias", "samples"]
497
+ }
498
+ )
499
+ for metric in metric_list:
500
+ stderr = "_stderr,".join(metric.split(","))
501
+
502
+ # gather metrics, sizes, and stderrs from subtasks
503
+ metrics = [
504
+ results[task][metric]
505
+ for task in task_list
506
+ if metric in results[task]
507
+ ] # TODO: copy?
508
+ stderrs = [
509
+ results[task][stderr]
510
+ for task in task_list
511
+ if stderr in results[task]
512
+ ]
513
+ sizes = [
514
+ results[task]["samples"]
515
+ for task in task_list
516
+ if metric in results[task]
517
+ ]
518
+
519
+ # compute group's pooled metric and stderr
520
+ results[group][
521
+ metric
522
+ ] = lm_eval.api.metrics.aggregate_subtask_metrics(metrics, sizes)
523
+ # TODO: calculate grouped metric using aggregation fn
524
+ if "N/A" in stderrs:
525
+ results[group][stderr] = "N/A"
526
+ else:
527
+ results[group][
528
+ stderr
529
+ ] = lm_eval.api.metrics.pooled_sample_stderr(stderrs, sizes)
530
+ # TODO: allow GroupConfigs to choose which variance formula is used, for back-compatibility
531
+ # To use the old (likely incorrect) variance formula, comment out the above and uncomment this line:
532
+ # results[group][stderr] = lm_eval.api.metrics.combined_sample_stderr(stderrs, sizes, metrics=metrics)
533
+
534
+ results[group]["samples"] = sum(sizes)
535
+
536
+ results_agg = defaultdict(dict)
537
+ groups_agg = defaultdict(dict)
538
+ all_tasks_list = list(task_hierarchy.keys())
539
+ while True:
540
+ add_tasks_list = list(k for k in results_agg.keys())
541
+ left_tasks_list = sorted(list(set(all_tasks_list) - set(add_tasks_list)))
542
+ if len(left_tasks_list) == 0:
543
+ break
544
+
545
+ _task_hierarchy = {
546
+ k: v for k, v in task_hierarchy.items() if k in left_tasks_list
547
+ }
548
+ _results_agg, _groups_agg = prepare_print_tasks(_task_hierarchy, results)
549
+
550
+ results_agg = {**results_agg, **_results_agg}
551
+ groups_agg = {**groups_agg, **_groups_agg}
552
+
553
+ for group_name, task_list in task_hierarchy.items():
554
+ if task_list:
555
+ num_fewshot[group_name] = num_fewshot[
556
+ task_list[0]
557
+ ] # TODO: validate this
558
+
559
+ results_dict = {
560
+ "results": dict(results_agg.items()),
561
+ **({"groups": dict(groups_agg.items())} if bool(groups_agg) else {}),
562
+ "group_subtasks": dict(reversed(task_hierarchy.items())),
563
+ "configs": dict(sorted(configs.items())),
564
+ "versions": dict(sorted(versions.items())),
565
+ "n-shot": dict(sorted(num_fewshot.items())),
566
+ }
567
+ if log_samples:
568
+ results_dict["samples"] = dict(samples)
569
+
570
+ return results_dict
571
+
572
+ else:
573
+ return None
574
+
575
+
576
+ def request_caching_arg_to_dict(cache_requests: str) -> dict:
577
+ request_caching_args = {
578
+ "cache_requests": cache_requests in {"true", "refresh"},
579
+ "rewrite_requests_cache": cache_requests == "refresh",
580
+ "delete_requests_cache": cache_requests == "delete",
581
+ }
582
+
583
+ return request_caching_args
lm-evaluation/build/lib/lm_eval/evaluator_utils.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import math
3
+ import pathlib
4
+ import sys
5
+ from typing import Dict, List, Optional, Tuple, Union
6
+
7
+ from lm_eval.api import metrics
8
+ from lm_eval.utils import eval_logger, positional_deprecated
9
+
10
+
11
+ class TaskOutput:
12
+ """
13
+ Wrapper class for Task outputs.It contains various attributes and methods to manage and calculate metrics for the task.
14
+
15
+ Attributes:
16
+ task (object): The task object.
17
+ task_name (str): The name of the task.
18
+ task_config (dict): The configuration of the task.
19
+ version (str): The version of the task.
20
+ group_name (str): The name of the task group.
21
+ n_shot (int): The number of shots for the task.
22
+ task_alias (str): The alias of the task.
23
+ group_alias (str): The alias of the task group.
24
+ is_group (bool): Indicates if the task is a group.
25
+ logged_samples (list): The list of logged samples.
26
+ sample_len (int): The length of the samples.
27
+ sample_metrics (defaultdict): The dictionary of samples' metrics.
28
+ agg_metrics (defaultdict): The dictionary of aggregate metrics.
29
+
30
+ Methods:
31
+ from_taskdict(cls, task_name: str, task):
32
+ Creates a TaskOutput instance from a task dictionary.
33
+
34
+ calculate_aggregate_metric(bootstrap_iters=100000) -> None:
35
+ Calculates the aggregate metrics for the task.
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ task=None,
41
+ task_name=None,
42
+ task_config=None,
43
+ version=None,
44
+ group_name=None,
45
+ n_shot=None,
46
+ task_alias=None,
47
+ group_alias=None,
48
+ is_group=None,
49
+ ):
50
+ self.task = task
51
+ self.task_config = task_config
52
+ self.task_name = task_name
53
+ self.group_name = group_name
54
+ self.version = version
55
+ self.n_shot = n_shot
56
+ self.task_alias = task_alias
57
+ self.group_alias = group_alias
58
+ self.is_group = is_group
59
+ self.logged_samples = []
60
+ self.sample_len = None
61
+ self.sample_metrics = collections.defaultdict(list)
62
+ self.agg_metrics = collections.defaultdict(list)
63
+
64
+ @classmethod
65
+ def from_taskdict(cls, task_name: str, task):
66
+ if isinstance(task, tuple):
67
+ group_name, task = task
68
+ else:
69
+ group_name = None
70
+ if not task:
71
+ # these gets filtered out in get_task_list
72
+ # once they are added to group hierarchy
73
+ is_group = True
74
+ return cls(
75
+ task=task, task_name=task_name, is_group=is_group, group_name=group_name
76
+ )
77
+ version = task.VERSION
78
+ task_config = dict(task.dump_config())
79
+ if (n_shot := task_config.get("num_fewshot")) == 0:
80
+ n_shot = task_config.get("metadata", {}).get("num_fewshot", 0)
81
+ task_alias = task_config.get("alias")
82
+ group_alias = task_config.get("group_alias")
83
+ return cls(
84
+ task=task,
85
+ task_name=task_name,
86
+ task_config=task_config,
87
+ group_name=group_name,
88
+ version=version,
89
+ n_shot=n_shot,
90
+ task_alias=task_alias,
91
+ group_alias=group_alias,
92
+ )
93
+
94
+ def calculate_aggregate_metric(self, bootstrap_iters=100000) -> None:
95
+ for (metric, filter_key), items in self.sample_metrics.items():
96
+ agg_fn = self.task.aggregation()[metric]
97
+ metric_key = f"{metric},{filter_key}"
98
+ self.agg_metrics[metric_key] = agg_fn(items)
99
+ self.sample_len = len(items) # TODO: same sample size for each metric?
100
+ if bootstrap_iters:
101
+ stderr_fn = metrics.stderr_for_metric(
102
+ metric=agg_fn,
103
+ bootstrap_iters=min(bootstrap_iters, 100)
104
+ if metric in ["bleu", "chrf", "ter"]
105
+ else bootstrap_iters,
106
+ )
107
+ self.agg_metrics[f"{metric}_stderr,{filter_key}"] = (
108
+ stderr_fn(items) if (stderr_fn and len(items) > 1) else "N/A"
109
+ )
110
+
111
+ def __repr__(self):
112
+ return (
113
+ f"TaskOutput(task_name={self.task_name}, "
114
+ f"group_name={self.group_name}, "
115
+ f"version={self.version},"
116
+ f"n_shot={self.n_shot}"
117
+ f"task_alias={self.task_alias}, group_alias={self.group_alias})"
118
+ )
119
+
120
+
121
+ def get_task_list(task_dict: dict) -> Tuple[Dict[str, list], List[TaskOutput]]:
122
+ task_hierarchy = collections.defaultdict(list)
123
+ outputs = list(TaskOutput.from_taskdict(x, y) for x, y in task_dict.items())
124
+ for task_output in outputs:
125
+ if group_name := task_output.group_name:
126
+ task_hierarchy[group_name].append(task_output.task_name)
127
+ else:
128
+ task_hierarchy[task_output.task_name] = []
129
+ # returns task_hierarchy tracking which groups contain which subtasks,
130
+ # and a list of TaskOutput classes for each non-group subtask
131
+ return task_hierarchy, [x for x in outputs if x.task]
132
+
133
+
134
+ def print_writeout(task) -> None:
135
+ for inst in task.instances:
136
+ # print the prompt for the first few documents
137
+ if inst.doc_id < 1:
138
+ eval_logger.info(
139
+ f"Task: {task}; document {inst.doc_id}; context prompt (starting on next line):\
140
+ \n{inst.args[0]}\n(end of prompt on previous line)\ntarget string or answer choice index (starting on next line):\n{task.doc_to_target(inst.doc)}\n(end of target on previous line)"
141
+ )
142
+ eval_logger.info(f"Request: {str(inst)}")
143
+
144
+
145
+ def get_sample_size(task, limit: Optional[int]) -> Union[int, None]:
146
+ if limit is not None:
147
+ limit = (
148
+ int(math.ceil(len(task.eval_docs) * limit)) if limit < 1.0 else int(limit)
149
+ )
150
+ return limit
151
+
152
+
153
+ def prepare_print_tasks(
154
+ task_hierarchy: dict, results: dict, tab=0
155
+ ) -> Tuple[dict, dict]:
156
+ """
157
+ @param task_hierarchy: Dictionary representing the group hierarchy of tasks. Each key is a group name and its
158
+ value is a list of task names.
159
+ @param results: Dictionary containing the results of each task. Each key is a
160
+ group name and its value is a dictionary of task results.
161
+ @param tab: The indentation level for printing the task
162
+ hierarchy. Default is 0.
163
+ @return: A tuple of two dictionaries: results_agg and groups_agg. results_agg contains
164
+ aggregated results for each task, and groups_agg contains aggregated results for each group.
165
+
166
+ Prepares the task hierarchy and aggregates the results for each task and group recursively for printing.
167
+ """
168
+ results_agg = collections.defaultdict(dict)
169
+ groups_agg = collections.defaultdict(dict)
170
+
171
+ (group_name, task_list), *_ = task_hierarchy.items()
172
+ task_list = sorted(task_list)
173
+
174
+ results_agg[group_name] = results[group_name].copy()
175
+ # results_agg[group_name]["tab"] = tab
176
+ if "samples" in results_agg[group_name]:
177
+ results_agg[group_name].pop("samples")
178
+
179
+ tab_string = " " * tab + "- " if tab > 0 else ""
180
+
181
+ if "alias" in results_agg[group_name]:
182
+ results_agg[group_name]["alias"] = tab_string + results_agg[group_name]["alias"]
183
+ else:
184
+ results_agg[group_name]["alias"] = tab_string + group_name
185
+
186
+ if len(task_list) > 0:
187
+ groups_agg[group_name] = results[group_name].copy()
188
+ # groups_agg[group_name]["tab"] = tab
189
+ if "samples" in groups_agg[group_name]:
190
+ groups_agg[group_name].pop("samples")
191
+
192
+ if "alias" in groups_agg[group_name]:
193
+ groups_agg[group_name]["alias"] = (
194
+ tab_string + groups_agg[group_name]["alias"]
195
+ )
196
+ else:
197
+ groups_agg[group_name]["alias"] = tab_string + group_name
198
+
199
+ for task_name in task_list:
200
+ if task_name in task_hierarchy:
201
+ _task_hierarchy = {
202
+ **{task_name: task_hierarchy[task_name]},
203
+ **task_hierarchy,
204
+ }
205
+ else:
206
+ _task_hierarchy = {
207
+ **{task_name: []},
208
+ **task_hierarchy,
209
+ }
210
+
211
+ _results_agg, _groups_agg = prepare_print_tasks(
212
+ _task_hierarchy, results, tab + 1
213
+ )
214
+ results_agg = {**results_agg, **_results_agg}
215
+ groups_agg = {**groups_agg, **_groups_agg}
216
+
217
+ return results_agg, groups_agg
218
+
219
+
220
+ def consolidate_results(
221
+ eval_tasks: List[TaskOutput],
222
+ ) -> Tuple[dict, dict, dict, dict, dict]:
223
+ """
224
+ @param eval_tasks: list(TaskOutput).
225
+ @return: A tuple containing the consolidated results, samples, configs, versions, and num_fewshot.
226
+
227
+ Consolidates the results of multiple evaluation tasks into a single structure.
228
+
229
+ The method iterates over each evaluation instance and extracts relevant information to create the consolidated
230
+ results structure. The consolidated results structure has the following properties:
231
+
232
+ - results: A defaultdict with task names as keys and dictionaries as values. Each dictionary contains
233
+ metric/filter pairs as keys and corresponding metric values as values. The "alias" key is used to store task
234
+ aliases specified in the task configuration.
235
+ - samples: A defaultdict with task names as keys and lists of log samples as values.
236
+ - configs: A defaultdict with task names as keys and task configurations as values.
237
+ - versions: A defaultdict with task names as keys and task versions as values.
238
+ - num_fewshot: A defaultdict with task names as keys and number of few-shot samples as values.
239
+
240
+ The method then returns the consolidated results, samples, configs, versions, and num_fewshot as a tuple.
241
+ """
242
+ # stores the final result for each task, for each metric/filter pair.
243
+ results = collections.defaultdict(dict)
244
+ # logs info about each document evaluated.
245
+ samples = collections.defaultdict(list)
246
+ # store num-fewshot value per task
247
+ num_fewshot = collections.defaultdict(int)
248
+ # Tracks the YAML configs of all chosen task
249
+ configs = collections.defaultdict(dict)
250
+ # Tracks each task's version.
251
+ versions = collections.defaultdict(dict)
252
+ for task_output in eval_tasks:
253
+ if "task_alias" in (task_config := task_output.task_config):
254
+ results[task_output.task_name]["alias"] = task_config["task_alias"]
255
+ if group_alias := task_output.group_alias:
256
+ if group_alias not in results and (group_name := task_output.group_name):
257
+ results[group_name]["alias"] = group_alias
258
+ num_fewshot[task_output.task_name] = task_output.n_shot
259
+ configs[task_output.task_name] = task_output.task_config
260
+ versions[task_output.task_name] = task_output.version
261
+ samples[task_output.task_name] = task_output.logged_samples
262
+ for (metric, filter_key), items in task_output.sample_metrics.items():
263
+ metric_key = f"{metric},{filter_key}"
264
+ results[task_output.task_name][metric_key] = task_output.agg_metrics[
265
+ metric_key
266
+ ]
267
+ results[task_output.task_name]["samples"] = task_output.sample_len
268
+ results[task_output.task_name][
269
+ f"{metric}_stderr,{filter_key}"
270
+ ] = task_output.agg_metrics[f"{metric}_stderr,{filter_key}"]
271
+ return results, samples, configs, versions, num_fewshot
272
+
273
+
274
+ @positional_deprecated
275
+ def find_test_root(start_path: pathlib.Path) -> pathlib.Path:
276
+ """
277
+ Search upward in the directory tree to a maximum of three layers
278
+ to find and return the package root (containing the 'tests' folder)
279
+ """
280
+ cur_path = start_path.resolve()
281
+ max_layers = 3
282
+ for _ in range(max_layers):
283
+ if (cur_path / "tests" / "test_version_stable.py").exists():
284
+ return cur_path
285
+ else:
286
+ cur_path = cur_path.parent.resolve()
287
+ raise FileNotFoundError(
288
+ f"Unable to find package root within {max_layers} upwards" + f"of {start_path}"
289
+ )
290
+
291
+
292
+ @positional_deprecated
293
+ def run_task_tests(task_list: List[str]):
294
+ """
295
+ Find the package root and run the tests for the given tasks
296
+ """
297
+ import pytest
298
+
299
+ package_root = find_test_root(start_path=pathlib.Path(__file__))
300
+ task_string = " or ".join(task_list)
301
+ args = [
302
+ f"{package_root}/tests/test_version_stable.py",
303
+ f"--rootdir={package_root}",
304
+ "-k",
305
+ f"{task_string}",
306
+ ]
307
+ sys.path.append(str(package_root))
308
+ pytest_return_val = pytest.main(args)
309
+ if pytest_return_val:
310
+ raise ValueError(
311
+ f"Not all tests for the specified tasks ({task_list}) ran successfully! Error code: {pytest_return_val}"
312
+ )
lm-evaluation/build/lib/lm_eval/logging_utils.py ADDED
@@ -0,0 +1,455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import json
3
+ import logging
4
+ import os
5
+ import re
6
+ import subprocess
7
+ from pathlib import Path
8
+ from typing import Any, Dict, List, Literal, Optional, Tuple, Union
9
+
10
+ import numpy as np
11
+ import pandas as pd
12
+ from packaging.version import Version
13
+ from torch.utils.collect_env import get_pretty_env_info
14
+ from transformers import __version__ as trans_version
15
+
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ def remove_none_pattern(input_string: str) -> Tuple[str, bool]:
21
+ """Remove the ',none' substring from the input_string if it exists at the end.
22
+
23
+ Args:
24
+ input_string (str): The input string from which to remove the ',none' substring.
25
+
26
+ Returns:
27
+ Tuple[str, bool]: A tuple containing the modified input_string with the ',none' substring removed
28
+ and a boolean indicating whether the modification was made (True) or not (False).
29
+ """
30
+ # Define the pattern to match ',none' at the end of the string
31
+ pattern = re.compile(r",none$")
32
+
33
+ # Use sub() to replace ',none' with an empty string
34
+ result = re.sub(pattern, "", input_string)
35
+
36
+ # check if the input_string changed
37
+ removed = result != input_string
38
+
39
+ return result, removed
40
+
41
+
42
+ def _handle_non_serializable(o: Any) -> Union[int, str, list]:
43
+ """Handle non-serializable objects by converting them to serializable types.
44
+
45
+ Args:
46
+ o (Any): The object to be handled.
47
+
48
+ Returns:
49
+ Union[int, str, list]: The converted object. If the object is of type np.int64 or np.int32,
50
+ it will be converted to int. If the object is of type set, it will be converted
51
+ to a list. Otherwise, it will be converted to str.
52
+ """
53
+ if isinstance(o, np.int64) or isinstance(o, np.int32):
54
+ return int(o)
55
+ elif isinstance(o, set):
56
+ return list(o)
57
+ else:
58
+ return str(o)
59
+
60
+
61
+ def get_wandb_printer() -> Literal["Printer"]:
62
+ """Returns a wandb printer instance for pretty stdout."""
63
+ from wandb.sdk.lib.printer import get_printer
64
+ from wandb.sdk.wandb_settings import Settings
65
+
66
+ printer = get_printer(Settings()._jupyter)
67
+ return printer
68
+
69
+
70
+ class WandbLogger:
71
+ def __init__(self, **kwargs) -> None:
72
+ """Attaches to wandb logger if already initialized. Otherwise, passes kwargs to wandb.init()
73
+
74
+ Args:
75
+ kwargs Optional[Any]: Arguments for configuration.
76
+
77
+ Parse and log the results returned from evaluator.simple_evaluate() with:
78
+ wandb_logger.post_init(results)
79
+ wandb_logger.log_eval_result()
80
+ wandb_logger.log_eval_samples(results["samples"])
81
+ """
82
+ try:
83
+ import wandb
84
+
85
+ assert Version(wandb.__version__) >= Version("0.13.6")
86
+ if Version(wandb.__version__) < Version("0.13.6"):
87
+ wandb.require("report-editing:v0")
88
+ except Exception as e:
89
+ logger.warning(
90
+ "To use the wandb reporting functionality please install wandb>=0.13.6.\n"
91
+ "To install the latest version of wandb run `pip install wandb --upgrade`\n"
92
+ f"{e}"
93
+ )
94
+
95
+ self.wandb_args: Dict[str, Any] = kwargs
96
+
97
+ # initialize a W&B run
98
+ if wandb.run is None:
99
+ self.run = wandb.init(**self.wandb_args)
100
+ else:
101
+ self.run = wandb.run
102
+
103
+ self.printer = get_wandb_printer()
104
+
105
+ def post_init(self, results: Dict[str, Any]) -> None:
106
+ self.results: Dict[str, Any] = copy.deepcopy(results)
107
+ self.task_names: List[str] = list(results.get("results", {}).keys())
108
+ self.group_names: List[str] = list(results.get("groups", {}).keys())
109
+
110
+ def _get_config(self) -> Dict[str, Any]:
111
+ """Get configuration parameters."""
112
+ self.task_configs = self.results.get("configs", {})
113
+ cli_configs = self.results.get("config", {})
114
+ configs = {
115
+ "task_configs": self.task_configs,
116
+ "cli_configs": cli_configs,
117
+ }
118
+
119
+ return configs
120
+
121
+ def _sanitize_results_dict(self) -> Tuple[Dict[str, str], Dict[str, Any]]:
122
+ """Sanitize the results dictionary."""
123
+ _results = copy.deepcopy(self.results.get("results", dict()))
124
+
125
+ # Remove None from the metric string name
126
+ tmp_results = copy.deepcopy(_results)
127
+ for task_name in self.task_names:
128
+ task_result = tmp_results.get(task_name, dict())
129
+ for metric_name, metric_value in task_result.items():
130
+ _metric_name, removed = remove_none_pattern(metric_name)
131
+ if removed:
132
+ _results[task_name][_metric_name] = metric_value
133
+ _results[task_name].pop(metric_name)
134
+
135
+ # remove string valued keys from the results dict
136
+ wandb_summary = {}
137
+ for task in self.task_names:
138
+ task_result = _results.get(task, dict())
139
+ for metric_name, metric_value in task_result.items():
140
+ if isinstance(metric_value, str):
141
+ wandb_summary[f"{task}/{metric_name}"] = metric_value
142
+
143
+ for summary_metric, summary_value in wandb_summary.items():
144
+ _task, _summary_metric = summary_metric.split("/")
145
+ _results[_task].pop(_summary_metric)
146
+
147
+ tmp_results = copy.deepcopy(_results)
148
+ for task_name, task_results in tmp_results.items():
149
+ for metric_name, metric_value in task_results.items():
150
+ _results[f"{task_name}/{metric_name}"] = metric_value
151
+ _results[task_name].pop(metric_name)
152
+ for task in self.task_names:
153
+ _results.pop(task)
154
+
155
+ return wandb_summary, _results
156
+
157
+ def _log_results_as_table(self) -> None:
158
+ """Generate and log evaluation results as a table to W&B."""
159
+ columns = [
160
+ "Version",
161
+ "Filter",
162
+ "num_fewshot",
163
+ "Metric",
164
+ "Value",
165
+ "Stderr",
166
+ ]
167
+
168
+ def make_table(columns: List[str], key: str = "results"):
169
+ import wandb
170
+
171
+ table = wandb.Table(columns=columns)
172
+ results = copy.deepcopy(self.results)
173
+
174
+ for k, dic in results.get(key).items():
175
+ if k in self.group_names and not key == "groups":
176
+ continue
177
+ version = results.get("versions").get(k)
178
+ if version == "N/A":
179
+ version = None
180
+ n = results.get("n-shot").get(k)
181
+
182
+ for (mf), v in dic.items():
183
+ m, _, f = mf.partition(",")
184
+ if m.endswith("_stderr"):
185
+ continue
186
+ if m == "alias":
187
+ continue
188
+
189
+ if m + "_stderr" + "," + f in dic:
190
+ se = dic[m + "_stderr" + "," + f]
191
+ if se != "N/A":
192
+ se = "%.4f" % se
193
+ table.add_data(*[k, version, f, n, m, str(v), str(se)])
194
+ else:
195
+ table.add_data(*[k, version, f, n, m, str(v), ""])
196
+
197
+ return table
198
+
199
+ # log the complete eval result to W&B Table
200
+ table = make_table(["Tasks"] + columns, "results")
201
+ self.run.log({"evaluation/eval_results": table})
202
+
203
+ if "groups" in self.results.keys():
204
+ table = make_table(["Groups"] + columns, "groups")
205
+ self.run.log({"evaluation/group_eval_results": table})
206
+
207
+ def _log_results_as_artifact(self) -> None:
208
+ """Log results as JSON artifact to W&B."""
209
+ import wandb
210
+
211
+ dumped = json.dumps(
212
+ self.results, indent=2, default=_handle_non_serializable, ensure_ascii=False
213
+ )
214
+ artifact = wandb.Artifact("results", type="eval_results")
215
+ with artifact.new_file("results.json", mode="w", encoding="utf-8") as f:
216
+ f.write(dumped)
217
+ self.run.log_artifact(artifact)
218
+
219
+ def log_eval_result(self) -> None:
220
+ """Log evaluation results to W&B."""
221
+ # Log configs to wandb
222
+ configs = self._get_config()
223
+ self.run.config.update(configs)
224
+
225
+ wandb_summary, self.wandb_results = self._sanitize_results_dict()
226
+ # update wandb.run.summary with items that were removed
227
+ self.run.summary.update(wandb_summary)
228
+ # Log the evaluation metrics to wandb
229
+ self.run.log(self.wandb_results)
230
+ # Log the evaluation metrics as W&B Table
231
+ self._log_results_as_table()
232
+ # Log the results dict as json to W&B Artifacts
233
+ self._log_results_as_artifact()
234
+
235
+ def _generate_dataset(
236
+ self, data: List[Dict[str, Any]], config: Dict[str, Any]
237
+ ) -> pd.DataFrame:
238
+ """Generate a dataset from evaluation data.
239
+
240
+ Args:
241
+ data (List[Dict[str, Any]]): The data to generate a dataset for.
242
+ config (Dict[str, Any]): The configuration of the task.
243
+
244
+ Returns:
245
+ pd.DataFrame: A dataframe that is ready to be uploaded to W&B.
246
+ """
247
+ ids = [x["doc_id"] for x in data]
248
+ labels = [x["target"] for x in data]
249
+ instance = [""] * len(ids)
250
+ resps = [""] * len(ids)
251
+ filtered_resps = [""] * len(ids)
252
+ model_outputs = {}
253
+
254
+ metrics_list = config["metric_list"]
255
+ metrics = {}
256
+ for metric in metrics_list:
257
+ metric = metric.get("metric")
258
+ if metric in ["word_perplexity", "byte_perplexity", "bits_per_byte"]:
259
+ metrics[f"{metric}_loglikelihood"] = [x[metric][0] for x in data]
260
+ if metric in ["byte_perplexity", "bits_per_byte"]:
261
+ metrics[f"{metric}_bytes"] = [x[metric][1] for x in data]
262
+ else:
263
+ metrics[f"{metric}_words"] = [x[metric][1] for x in data]
264
+ else:
265
+ metrics[metric] = [x[metric] for x in data]
266
+
267
+ if config["output_type"] == "loglikelihood":
268
+ instance = [x["arguments"][0][0] for x in data]
269
+ labels = [x["arguments"][0][1] for x in data]
270
+ resps = [
271
+ f'log probability of continuation is {x["resps"][0][0][0]} '
272
+ + "\n\n"
273
+ + "continuation will {} generated with greedy sampling".format(
274
+ "not be" if not x["resps"][0][0][1] else "be"
275
+ )
276
+ for x in data
277
+ ]
278
+ filtered_resps = [
279
+ f'log probability of continuation is {x["filtered_resps"][0][0]} '
280
+ + "\n\n"
281
+ + "continuation will {} generated with greedy sampling".format(
282
+ "not be" if not x["filtered_resps"][0][1] else "be"
283
+ )
284
+ for x in data
285
+ ]
286
+ elif config["output_type"] == "multiple_choice":
287
+ instance = [x["arguments"][0][0] for x in data]
288
+ choices = [
289
+ "\n".join([f"{idx}. {y[1]}" for idx, y in enumerate(x["arguments"])])
290
+ for x in data
291
+ ]
292
+ resps = [np.argmax([n[0][0] for n in x["resps"]]) for x in data]
293
+ filtered_resps = [
294
+ np.argmax([n[0] for n in x["filtered_resps"]]) for x in data
295
+ ]
296
+ elif config["output_type"] == "loglikelihood_rolling":
297
+ instance = [x["arguments"][0][0] for x in data]
298
+ resps = [x["resps"][0][0] for x in data]
299
+ filtered_resps = [x["filtered_resps"][0] for x in data]
300
+ elif config["output_type"] == "generate_until":
301
+ instance = [x["arguments"][0][0] for x in data]
302
+ resps = [x["resps"][0][0] for x in data]
303
+ filtered_resps = [x["filtered_resps"][0] for x in data]
304
+
305
+ model_outputs["raw_predictions"] = resps
306
+ model_outputs["filtered_predictions"] = filtered_resps
307
+
308
+ df_data = {
309
+ "id": ids,
310
+ "data": instance,
311
+ }
312
+ if config["output_type"] == "multiple_choice":
313
+ df_data["choices"] = choices
314
+
315
+ tmp_data = {
316
+ "input_len": [len(x) for x in instance],
317
+ "labels": labels,
318
+ "output_type": config["output_type"],
319
+ }
320
+ df_data.update(tmp_data)
321
+ df_data.update(model_outputs)
322
+ df_data.update(metrics)
323
+
324
+ return pd.DataFrame(df_data)
325
+
326
+ def _log_samples_as_artifact(
327
+ self, data: List[Dict[str, Any]], task_name: str
328
+ ) -> None:
329
+ import wandb
330
+
331
+ # log the samples as an artifact
332
+ dumped = json.dumps(
333
+ data,
334
+ indent=2,
335
+ default=_handle_non_serializable,
336
+ ensure_ascii=False,
337
+ )
338
+ artifact = wandb.Artifact(f"{task_name}", type="samples_by_task")
339
+ with artifact.new_file(
340
+ f"{task_name}_eval_samples.json", mode="w", encoding="utf-8"
341
+ ) as f:
342
+ f.write(dumped)
343
+ self.run.log_artifact(artifact)
344
+ # artifact.wait()
345
+
346
+ def log_eval_samples(self, samples: Dict[str, List[Dict[str, Any]]]) -> None:
347
+ """Log evaluation samples to W&B.
348
+
349
+ Args:
350
+ samples (Dict[str, List[Dict[str, Any]]]): Evaluation samples for each task.
351
+ """
352
+ task_names: List[str] = [
353
+ x for x in self.task_names if x not in self.group_names
354
+ ]
355
+
356
+ ungrouped_tasks = []
357
+ tasks_by_groups = {}
358
+
359
+ for task_name in task_names:
360
+ group_names = self.task_configs[task_name].get("group", None)
361
+ if group_names:
362
+ if isinstance(group_names, str):
363
+ group_names = [group_names]
364
+
365
+ for group_name in group_names:
366
+ if not tasks_by_groups.get(group_name):
367
+ tasks_by_groups[group_name] = [task_name]
368
+ else:
369
+ tasks_by_groups[group_name].append(task_name)
370
+ else:
371
+ ungrouped_tasks.append(task_name)
372
+
373
+ for task_name in ungrouped_tasks:
374
+ eval_preds = samples[task_name]
375
+
376
+ # log the samples as a W&B Table
377
+ df = self._generate_dataset(eval_preds, self.task_configs.get(task_name))
378
+ self.run.log({f"{task_name}_eval_results": df})
379
+
380
+ # log the samples as a json file as W&B Artifact
381
+ self._log_samples_as_artifact(eval_preds, task_name)
382
+
383
+ for group, grouped_tasks in tasks_by_groups.items():
384
+ grouped_df = pd.DataFrame()
385
+ for task_name in grouped_tasks:
386
+ eval_preds = samples[task_name]
387
+ df = self._generate_dataset(
388
+ eval_preds, self.task_configs.get(task_name)
389
+ )
390
+ df["group"] = group
391
+ df["task"] = task_name
392
+ grouped_df = pd.concat([grouped_df, df], ignore_index=True)
393
+
394
+ # log the samples as a json file as W&B Artifact
395
+ self._log_samples_as_artifact(eval_preds, task_name)
396
+
397
+ self.run.log({f"{group}_eval_results": grouped_df})
398
+
399
+
400
+ def get_commit_from_path(repo_path: Union[Path, str]) -> Optional[str]:
401
+ try:
402
+ git_folder = Path(repo_path, ".git")
403
+ if git_folder.is_file():
404
+ git_folder = Path(
405
+ git_folder.parent,
406
+ git_folder.read_text(encoding="utf-8").split("\n")[0].split(" ")[-1],
407
+ )
408
+ if Path(git_folder, "HEAD").exists():
409
+ head_name = (
410
+ Path(git_folder, "HEAD")
411
+ .read_text(encoding="utf-8")
412
+ .split("\n")[0]
413
+ .split(" ")[-1]
414
+ )
415
+ head_ref = Path(git_folder, head_name)
416
+ git_hash = head_ref.read_text(encoding="utf-8").replace("\n", "")
417
+ else:
418
+ git_hash = None
419
+ except Exception as err:
420
+ logger.debug(
421
+ f"Failed to retrieve a Git commit hash from path: {str(repo_path)}. Error: {err}"
422
+ )
423
+ return None
424
+ return git_hash
425
+
426
+
427
+ def get_git_commit_hash():
428
+ """
429
+ Gets the git commit hash of your current repo (if it exists).
430
+ Source: https://github.com/EleutherAI/gpt-neox/blob/b608043be541602170bfcfb8ec9bf85e8a0799e0/megatron/neox_arguments/neox_args.py#L42
431
+ """
432
+ try:
433
+ git_hash = subprocess.check_output(["git", "describe", "--always"]).strip()
434
+ git_hash = git_hash.decode()
435
+ except (subprocess.CalledProcessError, FileNotFoundError):
436
+ # FileNotFoundError occurs when git not installed on system
437
+ git_hash = get_commit_from_path(os.getcwd()) # git hash of repo if exists
438
+ return git_hash
439
+
440
+
441
+ def add_env_info(storage: Dict[str, Any]):
442
+ try:
443
+ pretty_env_info = get_pretty_env_info()
444
+ except Exception as err:
445
+ pretty_env_info = str(err)
446
+ transformers_version = trans_version
447
+ upper_dir_commit = get_commit_from_path(
448
+ Path(os.getcwd(), "..")
449
+ ) # git hash of upper repo if exists
450
+ added_info = {
451
+ "pretty_env_info": pretty_env_info,
452
+ "transformers_version": transformers_version,
453
+ "upper_git_hash": upper_dir_commit, # in case this repo is submodule
454
+ }
455
+ storage.update(added_info)
lm-evaluation/build/lib/lm_eval/tasks/__init__.py ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import logging
3
+ import os
4
+ from functools import partial
5
+ from typing import Dict, List, Mapping, Optional, Union
6
+
7
+ from lm_eval import utils
8
+ from lm_eval.api.task import ConfigurableTask, Task
9
+
10
+
11
+ class TaskManager:
12
+ """TaskManager indexes all tasks from the default `lm_eval/tasks/`
13
+ and an optional directory if provided.
14
+
15
+ """
16
+
17
+ def __init__(self, verbosity="INFO", include_path: Optional[str] = None) -> None:
18
+ self.verbosity = verbosity
19
+ self.include_path = include_path
20
+ self.logger = utils.eval_logger
21
+ self.logger.setLevel(getattr(logging, f"{verbosity}"))
22
+
23
+ self._task_index = self.initialize_tasks(include_path=include_path)
24
+ self._all_tasks = sorted(list(self._task_index.keys()))
25
+
26
+ self.task_group_map = collections.defaultdict(list)
27
+
28
+ def initialize_tasks(self, include_path: Optional[str] = None):
29
+ """Creates a dictionary of tasks index.
30
+
31
+ :param include_path: str = None
32
+ An additional path to be searched for tasks
33
+
34
+ :return
35
+ Dictionary of task names as key and task metadata
36
+ """
37
+ all_paths = [os.path.dirname(os.path.abspath(__file__)) + "/"]
38
+ if include_path is not None:
39
+ if isinstance(include_path, str):
40
+ include_path = [include_path]
41
+ all_paths.extend(include_path)
42
+
43
+ task_index = {}
44
+ for task_dir in all_paths:
45
+ tasks = self._get_task_and_group(task_dir)
46
+ task_index = {**tasks, **task_index}
47
+
48
+ return task_index
49
+
50
+ @property
51
+ def all_tasks(self):
52
+ return self._all_tasks
53
+
54
+ @property
55
+ def task_index(self):
56
+ return self._task_index
57
+
58
+ def match_tasks(self, task_list):
59
+ return utils.pattern_match(task_list, self.all_tasks)
60
+
61
+ def _name_is_registered(self, name) -> bool:
62
+ if name in self.all_tasks:
63
+ return True
64
+ return False
65
+
66
+ def _name_is_task(self, name) -> bool:
67
+ if self._name_is_registered(name) and ("task" in self.task_index[name]["type"]):
68
+ return True
69
+ return False
70
+
71
+ def _name_is_group(self, name) -> bool:
72
+ if self._name_is_registered(name) and (
73
+ self.task_index[name]["type"] == "group"
74
+ ):
75
+ return True
76
+ return False
77
+
78
+ def _name_is_python_task(self, name):
79
+ if self._name_is_registered(name) and (
80
+ self.task_index[name]["type"] == "python_task"
81
+ ):
82
+ return True
83
+ return False
84
+
85
+ def _config_is_task(self, config) -> bool:
86
+ if ("task" in config) and isinstance(config["task"], str):
87
+ return True
88
+ return False
89
+
90
+ def _config_is_group(self, config) -> bool:
91
+ if ("task" in config) and isinstance(config["task"], list):
92
+ return True
93
+ return False
94
+
95
+ def _config_is_python_task(self, config) -> bool:
96
+ if "class" in config:
97
+ return True
98
+ return False
99
+
100
+ def _get_yaml_path(self, name):
101
+ if name not in self.task_index:
102
+ raise ValueError
103
+ return self.task_index[name]["yaml_path"]
104
+
105
+ def _get_config(self, name):
106
+ if name not in self.task_index:
107
+ raise ValueError
108
+ yaml_path = self._get_yaml_path(name)
109
+ if yaml_path == -1:
110
+ return {}
111
+ else:
112
+ return utils.load_yaml_config(yaml_path, mode="full")
113
+
114
+ def _get_tasklist(self, name):
115
+ if self._name_is_task(name):
116
+ raise ValueError
117
+ return self.task_index[name]["task"]
118
+
119
+ def _process_alias(self, config, group=None):
120
+ # If the group is not the same as the original
121
+ # group which the group alias was intended for,
122
+ # Set the group_alias to None instead.
123
+ if ("group_alias" in config) and ("group" in config) and group is not None:
124
+ if config["group"] != group:
125
+ config["group_alias"] = None
126
+ return config
127
+
128
+ def _load_individual_task_or_group(
129
+ self,
130
+ name_or_config: Optional[Union[str, dict]] = None,
131
+ parent_name: Optional[str] = None,
132
+ update_config: Optional[dict] = None,
133
+ yaml_path: Optional[str] = None,
134
+ ) -> Mapping:
135
+ def load_task(config, task, group=None, yaml_path=None):
136
+ if "include" in config:
137
+ if yaml_path is None:
138
+ raise ValueError
139
+ config.update(
140
+ utils.load_yaml_config(
141
+ yaml_path,
142
+ yaml_config={"include": config.pop("include")},
143
+ mode="full",
144
+ )
145
+ )
146
+ if self._config_is_python_task(config):
147
+ task_object = config["class"]()
148
+ else:
149
+ config = self._process_alias(config, group=group)
150
+ task_object = ConfigurableTask(config=config)
151
+ if group is not None:
152
+ task_object = (group, task_object)
153
+ return {task: task_object}
154
+
155
+ if isinstance(name_or_config, str):
156
+ if update_config is not None:
157
+ # Process name_or_config as a dict instead
158
+ name_or_config = {"task": name_or_config, **update_config}
159
+ elif self._name_is_task(name_or_config):
160
+ task_config = self._get_config(name_or_config)
161
+ return load_task(task_config, task=name_or_config, group=parent_name)
162
+ else:
163
+ group_name = name_or_config
164
+ subtask_list = self._get_tasklist(name_or_config)
165
+ if subtask_list == -1:
166
+ group_config = self._get_config(name_or_config)
167
+ subtask_list = group_config["task"]
168
+
169
+ # This checks if we're at the root.
170
+ if parent_name is None:
171
+ group_config = self._get_config(name_or_config)
172
+ if set(group_config.keys()) > {"task", "group"}:
173
+ update_config = {
174
+ k: v
175
+ for k, v in group_config.items()
176
+ if k not in ["task", "group"]
177
+ }
178
+ yaml_path = self._get_yaml_path(group_name)
179
+
180
+ if (update_config is not None) and ("group_alias" in update_config):
181
+ group_name = update_config["group_alias"]
182
+ update_config.pop("group_alias")
183
+
184
+ if isinstance(name_or_config, dict):
185
+ if update_config is not None:
186
+ name_or_config = {
187
+ **name_or_config,
188
+ **update_config,
189
+ }
190
+
191
+ if self._config_is_task(name_or_config):
192
+ name = name_or_config["task"]
193
+ # If the name is registered as a group
194
+ # if self._name_is_task(name) is False:
195
+ if self._name_is_group(name):
196
+ group_name = name
197
+ update_config = {
198
+ k: v for k, v in name_or_config.items() if k != "task"
199
+ }
200
+ subtask_list = self._get_tasklist(name)
201
+ if subtask_list == -1:
202
+ subtask_list = self._get_config(name)["task"]
203
+ else:
204
+ if self._name_is_registered(name):
205
+ base_task_config = self._get_config(name)
206
+
207
+ # Check if this is a duplicate.
208
+ if parent_name is not None:
209
+ name_or_config["group"] = parent_name
210
+ num_duplicate = len(
211
+ list(
212
+ filter(
213
+ lambda x: x.startswith(name),
214
+ self.task_group_map[parent_name],
215
+ )
216
+ )
217
+ )
218
+ if num_duplicate > 0:
219
+ name = f"{name}-{num_duplicate}"
220
+ self.task_group_map[parent_name].append(name)
221
+
222
+ task_config = {
223
+ **base_task_config,
224
+ **name_or_config,
225
+ }
226
+ else:
227
+ task_config = name_or_config
228
+ return load_task(
229
+ task_config, task=name, group=parent_name, yaml_path=yaml_path
230
+ )
231
+ else:
232
+ group_name = name_or_config["group"]
233
+ subtask_list = name_or_config["task"]
234
+ if set(name_or_config.keys()) > {"task", "group"}:
235
+ update_config = {
236
+ k: v
237
+ for k, v in name_or_config.items()
238
+ if k not in ["task", "group"]
239
+ }
240
+
241
+ all_subtasks = {}
242
+ if parent_name is not None:
243
+ all_subtasks = {group_name: (parent_name, None)}
244
+
245
+ fn = partial(
246
+ self._load_individual_task_or_group,
247
+ parent_name=group_name,
248
+ update_config=update_config,
249
+ yaml_path=yaml_path,
250
+ )
251
+ all_subtasks = {
252
+ **all_subtasks,
253
+ **dict(collections.ChainMap(*map(fn, subtask_list))),
254
+ }
255
+ return all_subtasks
256
+
257
+ def load_task_or_group(self, task_list: Optional[Union[str, list]] = None) -> dict:
258
+ """Loads a dictionary of task objects from a list
259
+
260
+ :param task_list: Union[str, list] = None
261
+ Single string or list of string of task names to be loaded
262
+
263
+ :return
264
+ Dictionary of task objects
265
+ """
266
+ if isinstance(task_list, str):
267
+ task_list = [task_list]
268
+
269
+ all_loaded_tasks = dict(
270
+ collections.ChainMap(*map(self._load_individual_task_or_group, task_list))
271
+ )
272
+ return all_loaded_tasks
273
+
274
+ def load_config(self, config: Dict):
275
+ return self._load_individual_task_or_group(config)
276
+
277
+ def _get_task_and_group(self, task_dir: str):
278
+ """Creates a dictionary of tasks index with the following metadata,
279
+ - `type`, that can be either `task`, `python_task`, or `group`.
280
+ `task` refer to regular task configs, `python_task` are special
281
+ yaml files that only consists of `task` and `class` parameters.
282
+ `group` are group configs.
283
+ - `yaml_path`, path to the yaml file. If the entry is a `group` that
284
+ was configured through a task config, the yaml_path will be -1
285
+ and all subtasks will be listed in `task` (see below)
286
+ - `task`, reserved for entries with `type` as `group`. This will list
287
+ all subtasks. When a group config is created (as opposed to task
288
+ config having `group` parameter set), this will be set to -1 to
289
+ avoid recursive indexing. The whole list of subtasks will be loaded
290
+ at evaluation.
291
+
292
+ :param task_dir: str
293
+ A directory to check for tasks
294
+
295
+ :return
296
+ Dictionary of task names as key and task metadata
297
+ """
298
+ tasks_and_groups = collections.defaultdict()
299
+ for root, _, file_list in os.walk(task_dir):
300
+ for f in file_list:
301
+ if f.endswith(".yaml"):
302
+ yaml_path = os.path.join(root, f)
303
+ config = utils.load_yaml_config(yaml_path, mode="simple")
304
+ if self._config_is_python_task(config):
305
+ # This is a python class config
306
+ tasks_and_groups[config["task"]] = {
307
+ "type": "python_task",
308
+ "yaml_path": yaml_path,
309
+ }
310
+ elif self._config_is_group(config):
311
+ # This is a group config
312
+ tasks_and_groups[config["group"]] = {
313
+ "type": "group",
314
+ "task": -1, # This signals that
315
+ # we don't need to know
316
+ # the task list for indexing
317
+ # as it can be loaded
318
+ # when called.
319
+ "yaml_path": yaml_path,
320
+ }
321
+
322
+ # # Registered the level 1 tasks from a group config
323
+ # for config in config["task"]:
324
+ # if isinstance(config, dict) and self._config_is_task(config):
325
+ # task = config["task"]
326
+ # tasks_and_groups[task] = {
327
+ # "type": "task",
328
+ # "yaml_path": yaml_path,
329
+ # }
330
+
331
+ elif self._config_is_task(config):
332
+ # This is a task config
333
+ task = config["task"]
334
+ tasks_and_groups[task] = {
335
+ "type": "task",
336
+ "yaml_path": yaml_path,
337
+ }
338
+
339
+ if "group" in config:
340
+ groups = config["group"]
341
+ if isinstance(config["group"], str):
342
+ groups = [groups]
343
+
344
+ for group in groups:
345
+ if group not in tasks_and_groups:
346
+ tasks_and_groups[group] = {
347
+ "type": "group",
348
+ "task": [task],
349
+ "yaml_path": -1,
350
+ }
351
+ else:
352
+ tasks_and_groups[group]["task"].append(task)
353
+ else:
354
+ self.logger.debug(f"File {f} in {root} could not be loaded")
355
+
356
+ return tasks_and_groups
357
+
358
+
359
+ def get_task_name_from_config(task_config: Dict[str, str]) -> str:
360
+ if "task" in task_config:
361
+ return task_config["task"]
362
+ if "dataset_name" in task_config:
363
+ return "{dataset_path}_{dataset_name}".format(**task_config)
364
+ else:
365
+ return "{dataset_path}".format(**task_config)
366
+
367
+
368
+ def get_task_name_from_object(task_object):
369
+ if hasattr(task_object, "config"):
370
+ return task_object._config["task"]
371
+
372
+ # TODO: scrap this
373
+ # this gives a mechanism for non-registered tasks to have a custom name anyways when reporting
374
+ return (
375
+ task_object.EVAL_HARNESS_NAME
376
+ if hasattr(task_object, "EVAL_HARNESS_NAME")
377
+ else type(task_object).__name__
378
+ )
379
+
380
+
381
+ def get_task_dict(
382
+ task_name_list: Union[str, List[Union[str, Dict, Task]]],
383
+ task_manager: Optional[TaskManager] = None,
384
+ ):
385
+ """Creates a dictionary of task objects from either a name of task, config, or prepared Task object.
386
+
387
+ :param task_name_list: List[Union[str, Dict, Task]]
388
+ Name of model or LM object, see lm_eval.models.get_model
389
+ :param task_manager: TaskManager = None
390
+ A TaskManager object that stores indexed tasks. If not set,
391
+ task_manager will load one. This should be set by the user
392
+ if there are additional paths that want to be included
393
+ via `include_path`
394
+
395
+ :return
396
+ Dictionary of task objects
397
+ """
398
+ task_name_from_string_dict = {}
399
+ task_name_from_config_dict = {}
400
+ task_name_from_object_dict = {}
401
+
402
+ if isinstance(task_name_list, str):
403
+ task_name_list = [task_name_list]
404
+ elif isinstance(task_name_list, list):
405
+ if not all([isinstance(task, (str, dict, Task)) for task in task_name_list]):
406
+ raise TypeError(
407
+ "Expected all list items to be of types 'str', 'dict', or 'Task', but at least one entry did not match."
408
+ )
409
+ else:
410
+ raise TypeError(
411
+ f"Expected a 'str' or 'list' but received {type(task_name_list)}."
412
+ )
413
+
414
+ string_task_name_list = [task for task in task_name_list if isinstance(task, str)]
415
+ others_task_name_list = [task for task in task_name_list if ~isinstance(task, str)]
416
+ if len(string_task_name_list) > 0:
417
+ if task_manager is None:
418
+ task_manager = TaskManager()
419
+
420
+ task_name_from_string_dict = task_manager.load_task_or_group(
421
+ string_task_name_list
422
+ )
423
+
424
+ for task_element in others_task_name_list:
425
+ if isinstance(task_element, dict):
426
+ task_name_from_config_dict = {
427
+ **task_name_from_config_dict,
428
+ **task_manager.load_config(config=task_element),
429
+ }
430
+
431
+ elif isinstance(task_element, Task):
432
+ task_name_from_object_dict = {
433
+ **task_name_from_object_dict,
434
+ get_task_name_from_object(task_element): task_element,
435
+ }
436
+
437
+ if not set(task_name_from_string_dict.keys()).isdisjoint(
438
+ set(task_name_from_object_dict.keys())
439
+ ):
440
+ raise ValueError
441
+
442
+ return {
443
+ **task_name_from_string_dict,
444
+ **task_name_from_config_dict,
445
+ **task_name_from_object_dict,
446
+ }
lm-evaluation/build/lib/lm_eval/tasks/anli/README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ANLI
2
+
3
+ ### Paper
4
+
5
+ Title: `Adversarial NLI: A New Benchmark for Natural Language Understanding`
6
+
7
+ Paper Link: https://arxiv.org/abs/1910.14599
8
+
9
+ Adversarial NLI (ANLI) is a dataset collected via an iterative, adversarial
10
+ human-and-model-in-the-loop procedure. It consists of three rounds that progressively
11
+ increase in difficulty and complexity, and each question-answer includes annotator-
12
+ provided explanations.
13
+
14
+ Homepage: https://github.com/facebookresearch/anli
15
+
16
+ ### Citation
17
+
18
+ ```
19
+ @inproceedings{nie-etal-2020-adversarial,
20
+ title = "Adversarial {NLI}: A New Benchmark for Natural Language Understanding",
21
+ author = "Nie, Yixin and
22
+ Williams, Adina and
23
+ Dinan, Emily and
24
+ Bansal, Mohit and
25
+ Weston, Jason and
26
+ Kiela, Douwe",
27
+ booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
28
+ year = "2020",
29
+ publisher = "Association for Computational Linguistics",
30
+ }
31
+ ```
32
+
33
+ ### Groups and Tasks
34
+
35
+ #### Groups
36
+
37
+ * `anli`: Evaluates `anli_r1`, `anli_r2`, and `anli_r3`
38
+
39
+ #### Tasks
40
+ * `anli_r1`: The data collected adversarially in the first round.
41
+ * `anli_r2`: The data collected adversarially in the second round, after training on the previous round's data.
42
+ * `anli_r3`: The data collected adversarially in the third round, after training on the previous multiple rounds of data.
43
+
44
+
45
+ ### Checklist
46
+
47
+ For adding novel benchmarks/datasets to the library:
48
+ * [x] Is the task an existing benchmark in the literature?
49
+ * [x] Have you referenced the original paper that introduced the task?
50
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
51
+
52
+
53
+ If other tasks on this dataset are already supported:
54
+ * [ ] Is the "Main" variant of this task clearly denoted?
55
+ * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
56
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/anli/anli_r1.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - anli
3
+ task: anli_r1
4
+ dataset_path: anli
5
+ dataset_name: null
6
+ output_type: multiple_choice
7
+ training_split: train_r1
8
+ validation_split: dev_r1
9
+ test_split: test_r1
10
+ doc_to_text: "{{premise}}\nQuestion: {{hypothesis}} True, False, or Neither?\nAnswer:"
11
+ # True = entailment
12
+ # False = contradiction
13
+ # Neither = neutral
14
+ doc_to_target: "{{['True', 'Neither', 'False'][label]}}"
15
+ doc_to_choice:
16
+ - "True"
17
+ - "Neither"
18
+ - "False"
19
+ should_decontaminate: true
20
+ doc_to_decontamination_query: premise
21
+ metric_list:
22
+ - metric: acc
23
+ aggregation: mean
24
+ higher_is_better: true
25
+ metadata:
26
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/anli/anli_r2.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: anli_r1.yaml
2
+ task: anli_r2
3
+ training_split: train_r2
4
+ validation_split: dev_r2
5
+ test_split: test_r2
lm-evaluation/build/lib/lm_eval/tasks/anli/anli_r3.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: anli_r1.yaml
2
+ task: anli_r3
3
+ training_split: train_r3
4
+ validation_split: dev_r3
5
+ test_split: test_r3
lm-evaluation/build/lib/lm_eval/tasks/arithmetic/README.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Arithmetic
2
+
3
+ ### Paper
4
+
5
+ Title: `Language Models are Few-Shot Learners`
6
+ Abstract: https://arxiv.org/abs/2005.14165
7
+
8
+ A small battery of 10 tests that involve asking language models a simple arithmetic
9
+ problem in natural language.
10
+
11
+ Homepage: https://github.com/openai/gpt-3/tree/master/data
12
+
13
+
14
+ ### Citation
15
+
16
+ ```
17
+ @inproceedings{NEURIPS2020_1457c0d6,
18
+ author = {Brown, Tom and Mann, Benjamin and Ryder, Nick and Subbiah, Melanie and Kaplan, Jared D and Dhariwal, Prafulla and Neelakantan, Arvind and Shyam, Pranav and Sastry, Girish and Askell, Amanda and Agarwal, Sandhini and Herbert-Voss, Ariel and Krueger, Gretchen and Henighan, Tom and Child, Rewon and Ramesh, Aditya and Ziegler, Daniel and Wu, Jeffrey and Winter, Clemens and Hesse, Chris and Chen, Mark and Sigler, Eric and Litwin, Mateusz and Gray, Scott and Chess, Benjamin and Clark, Jack and Berner, Christopher and McCandlish, Sam and Radford, Alec and Sutskever, Ilya and Amodei, Dario},
19
+ booktitle = {Advances in Neural Information Processing Systems},
20
+ editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin},
21
+ pages = {1877--1901},
22
+ publisher = {Curran Associates, Inc.},
23
+ title = {Language Models are Few-Shot Learners},
24
+ url = {https://proceedings.neurips.cc/paper/2020/file/1457c0d6bfcb4967418bfb8ac142f64a-Paper.pdf},
25
+ volume = {33},
26
+ year = {2020}
27
+ }
28
+ ```
29
+
30
+ ### Groups and Tasks
31
+
32
+ #### Groups
33
+
34
+ * `arithmetic`: Evaluates `1dc` to `5ds`
35
+
36
+ #### Tasks
37
+
38
+ * `arithmetic_1dc`
39
+ * `arithmetic_2da`
40
+ * `arithmetic_2dm`
41
+ * `arithmetic_2ds`
42
+ * `arithmetic_3da`
43
+ * `arithmetic_3ds`
44
+ * `arithmetic_4da`
45
+ * `arithmetic_4ds`
46
+ * `arithmetic_5da`
47
+ * `arithmetic_5ds`
48
+
49
+ ### Checklist
50
+
51
+ For adding novel benchmarks/datasets to the library:
52
+ * [ ] Is the task an existing benchmark in the literature?
53
+ * [ ] Have you referenced the original paper that introduced the task?
54
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
55
+
56
+
57
+ If other tasks on this dataset are already supported:
58
+ * [ ] Is the "Main" variant of this task clearly denoted?
59
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
60
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_1dc.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - arithmetic
3
+ task: arithmetic_1dc
4
+ dataset_path: EleutherAI/arithmetic
5
+ dataset_name: arithmetic_1dc
6
+ output_type: loglikelihood
7
+ validation_split: validation
8
+ test_split: null
9
+ doc_to_text: "{{context}}"
10
+ doc_to_target: "{{completion}}"
11
+ metric_list:
12
+ - metric: acc
13
+ aggregation: mean
14
+ higher_is_better: true
15
+ metadata:
16
+ version: 1.0
17
+ dataset_kwargs:
18
+ trust_remote_code: true
lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_2da.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_2da
3
+ dataset_name: arithmetic_2da
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_2dm.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_2dm
3
+ dataset_name: arithmetic_2dm
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_2ds.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_2ds
3
+ dataset_name: arithmetic_2ds
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_3da.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_3da
3
+ dataset_name: arithmetic_3da
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_3ds.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_3ds
3
+ dataset_name: arithmetic_3ds
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_4da.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_4da
3
+ dataset_name: arithmetic_4da
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_4ds.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_4ds
3
+ dataset_name: arithmetic_4ds
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_5da.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_5da
3
+ dataset_name: arithmetic_5da
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_5ds.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_5ds
3
+ dataset_name: arithmetic_5ds
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/build/lib/lm_eval/tasks/bbh/README.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # BigBenchHard
2
+
3
+ ## Paper
4
+ Title: `Challenging BIG-Bench Tasks and Whether Chain-of-Thought Can Solve Them`
5
+ Abstract: https://arxiv.org/abs/2210.09261
6
+
7
+ A suite of 23 challenging BIG-Bench tasks which we call BIG-Bench Hard (BBH).
8
+ These are the task for which prior language model evaluations did not outperform
9
+ the average human-rater.
10
+
11
+ Homepage: https://github.com/suzgunmirac/BIG-Bench-Hard
12
+
13
+
14
+ ## Citation
15
+ ```
16
+ @article{suzgun2022challenging,
17
+ title={Challenging BIG-Bench Tasks and Whether Chain-of-Thought Can Solve Them},
18
+ author={Suzgun, Mirac and Scales, Nathan and Sch{\"a}rli, Nathanael and Gehrmann, Sebastian and Tay, Yi and Chung, Hyung Won and Chowdhery, Aakanksha and Le, Quoc V and Chi, Ed H and Zhou, Denny and and Wei, Jason},
19
+ journal={arXiv preprint arXiv:2210.09261},
20
+ year={2022}
21
+ }
22
+ ```
23
+
24
+ ### Groups and Tasks
25
+
26
+ #### Groups
27
+
28
+ - `bbh_zeroshot`
29
+ - `bbh_fewshot`
30
+ - `bbh_cot_fewshot`
31
+ - `bbh_cot_zeroshot`
32
+
33
+
34
+ #### Tasks
35
+
36
+ - ...
37
+
38
+ ### Checklist
39
+
40
+ - [x] Is in Eval-harness v1.0 ?
41
+ - [ ] Has been checked for regression from v1.0?
42
+ - [ ] Has been checked for equivalence with original paper methodology?
43
+ - [ ] "Main" checked variant clearly denoted?
44
+
45
+ ### Variant Wishlist
46
+
47
+ - [ ] Variant with Calculator (see https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py for example implementation)
48
+ - [ ] Using Verifiers
49
+ - [ ] Majority voting "without CoT"
lm-evaluation/build/lib/lm_eval/tasks/bbh/_generate_configs.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Take in a YAML, and output all other splits with this YAML
3
+ """
4
+ import argparse
5
+ import os
6
+ import re
7
+
8
+ import datasets
9
+ import requests
10
+ import yaml
11
+ from tqdm import tqdm
12
+
13
+ from lm_eval import utils
14
+
15
+
16
+ def parse_args():
17
+ parser = argparse.ArgumentParser()
18
+ parser.add_argument("--base_yaml_path", required=True)
19
+ parser.add_argument("--save_prefix_path", default="zeroshot")
20
+ parser.add_argument("--cot", default=False)
21
+ parser.add_argument("--fewshot", default=False)
22
+ parser.add_argument("--task_prefix", default="")
23
+ return parser.parse_args()
24
+
25
+
26
+ if __name__ == "__main__":
27
+ args = parse_args()
28
+
29
+ # get filename of base_yaml so we can `"include": ` it in our other YAMLs.
30
+ base_yaml_name = os.path.split(args.base_yaml_path)[-1]
31
+ with open(args.base_yaml_path, encoding="utf-8") as f:
32
+ base_yaml = yaml.full_load(f)
33
+
34
+ base_doc_to_text = "Q: {{input}}\nA:"
35
+ answer_regex = re.compile("(?<=answer is )(.*)(?=.)")
36
+
37
+ dataset_path = "lukaemon/bbh"
38
+ for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()):
39
+ resp = requests.get(
40
+ f"https://raw.githubusercontent.com/suzgunmirac/BIG-Bench-Hard/main/cot-prompts/{task}.txt"
41
+ ).content.decode("utf-8")
42
+ prompt = resp.split("\n-----\n")[-1]
43
+ description, *few_shot = prompt.split("\n\n")
44
+
45
+ prefix_doc_to_text = ""
46
+ if args.fewshot:
47
+ if args.cot:
48
+ prefix_doc_to_text = "\n\n".join(few_shot) + "\n\n"
49
+ else:
50
+ for shot in few_shot:
51
+ try:
52
+ answer = answer_regex.search(shot)[0]
53
+ except Exception:
54
+ print("task", task)
55
+ print(shot)
56
+ example = shot.split("Let's think step by step.")[0]
57
+ prefix_doc_to_text += f"{example}{answer}\n\n"
58
+
59
+ doc_to_text = prefix_doc_to_text + base_doc_to_text
60
+ if args.cot:
61
+ doc_to_text = doc_to_text + " Let's think step by step.\n"
62
+
63
+ yaml_dict = {
64
+ "include": base_yaml_name,
65
+ "task": f"bbh_{args.task_prefix}_{task}",
66
+ "dataset_name": task,
67
+ "description": description + "\n\n",
68
+ "doc_to_text": doc_to_text,
69
+ }
70
+
71
+ file_save_path = args.save_prefix_path + f"/{task}.yaml"
72
+ utils.eval_logger.info(f"Saving yaml for subset {task} to {file_save_path}")
73
+ with open(file_save_path, "w", encoding="utf-8") as yaml_file:
74
+ yaml.dump(
75
+ yaml_dict,
76
+ yaml_file,
77
+ width=float("inf"),
78
+ allow_unicode=True,
79
+ default_style='"',
80
+ )
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/boolean_expressions.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "boolean_expressions"
2
+ "description": "Evaluate the result of a random Boolean expression.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_boolean_expressions"
6
+
7
+ filter_list:
8
+ - name: "flexible-extract"
9
+ filter:
10
+ - function: "regex"
11
+ group_select: -1
12
+ regex_pattern: "\\b(True|False)\\b"
13
+ - function: "take_first"
14
+ - name: "strict-match"
15
+ filter:
16
+ - function: "regex"
17
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
18
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/causal_judgement.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "causal_judgement"
2
+ "description": "Answer questions about causal attribution.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_causal_judgement"
6
+
7
+ filter_list:
8
+ - name: "flexible-extract"
9
+ filter:
10
+ - function: "regex"
11
+ group_select: -1
12
+ regex_pattern: "\\b(Yes|No|yes|no)\\b"
13
+ - function: "take_first"
14
+ - name: "strict-match"
15
+ filter:
16
+ - function: "regex"
17
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
18
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/date_understanding.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "date_understanding"
2
+ "description": "Infer the date from context.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_date_understanding"
6
+
7
+ filter_list:
8
+ - name: "flexible-extract"
9
+ filter:
10
+ - function: !function utils.MultiChoiceRegexFilter
11
+ group_select: -1
12
+ ignore_case: true
13
+ ignore_punctuation: true
14
+ regex_pattern: "(\\([A-Z]\\))"
15
+ - function: "take_first"
16
+ - name: "strict-match"
17
+ filter:
18
+ - function: "regex"
19
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
20
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/formal_fallacies.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "formal_fallacies"
2
+ "description": "Distinguish deductively valid arguments from formal fallacies.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_formal_fallacies"
6
+
7
+ filter_list:
8
+ - name: "flexible-extract"
9
+ filter:
10
+ - function: "regex"
11
+ group_select: -1
12
+ regex_pattern: "\\b(valid|invalid)\\b"
13
+ - function: "take_first"
14
+ - name: "strict-match"
15
+ filter:
16
+ - function: "regex"
17
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
18
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/geometric_shapes.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "geometric_shapes"
2
+ "description": "Name geometric shapes from their SVG paths.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_geometric_shapes"
6
+
7
+ filter_list:
8
+ - name: "flexible-extract"
9
+ filter:
10
+ - function: !function utils.MultiChoiceRegexFilter
11
+ group_select: -1
12
+ ignore_case: true
13
+ ignore_punctuation: true
14
+ regex_pattern: "(\\([A-Z]\\))"
15
+ - function: "take_first"
16
+ - name: "strict-match"
17
+ filter:
18
+ - function: "regex"
19
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
20
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/hyperbaton.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "hyperbaton"
2
+ "description": "Order adjectives correctly in English sentences.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_hyperbaton"
6
+
7
+ filter_list:
8
+ - name: "flexible-extract"
9
+ filter:
10
+ - function: !function utils.MultiChoiceRegexFilter
11
+ group_select: -1
12
+ ignore_case: true
13
+ ignore_punctuation: true
14
+ regex_pattern: "(\\([A-Z]\\))"
15
+ - function: "take_first"
16
+ - name: "strict-match"
17
+ filter:
18
+ - function: "regex"
19
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
20
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_five_objects.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "logical_deduction_five_objects"
2
+ "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_logical_deduction_five_objects"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_seven_objects.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "logical_deduction_seven_objects"
2
+ "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_logical_deduction_seven_objects"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_three_objects.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "logical_deduction_three_objects"
2
+ "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_logical_deduction_three_objects"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/movie_recommendation.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "movie_recommendation"
2
+ "description": "Recommend movies similar to the given list of movies.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_movie_recommendation"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/multistep_arithmetic_two.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "multistep_arithmetic_two"
2
+ "description": "Solve multi-step arithmetic problems.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_multistep_arithmetic_two"
6
+
7
+ filter_list:
8
+ - name: "flexible-extract"
9
+ filter:
10
+ - function: !function utils.NumberParseRegexFilter
11
+ group_select: -1
12
+ regex_pattern: "([-0-9]+)"
13
+ - function: "take_first"
14
+ - name: "strict-match"
15
+ filter:
16
+ - function: "regex"
17
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
18
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/navigate.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "navigate"
2
+ "description": "Given a series of navigation instructions, determine whether one would end up back at the starting point.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_navigate"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: "regex"
10
+ group_select: -1
11
+ regex_pattern: "\\b(Yes|No|yes|no)\\b"
12
+ - function: "take_first"
13
+ - name: "strict-match"
14
+ filter:
15
+ - function: "regex"
16
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
17
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/penguins_in_a_table.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "penguins_in_a_table"
2
+ "description": "Answer questions about a table of penguins and their attributes.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_penguins_in_a_table"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/reasoning_about_colored_objects.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "reasoning_about_colored_objects"
2
+ "description": "Answer extremely simple questions about the colors of objects on a surface.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_reasoning_about_colored_objects"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/salient_translation_error_detection.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "salient_translation_error_detection"
2
+ "description": "Detect the type of error in an English translation of a German source sentence.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_salient_translation_error_detection"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/sports_understanding.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "sports_understanding"
2
+ "description": "Determine whether an artificially constructed sentence relating to sports is plausible or not.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_sports_understanding"
6
+
7
+ filter_list:
8
+ - name: "flexible-extract"
9
+ filter:
10
+ - function: !function utils.MapRegexFilter
11
+ group_select: -1
12
+ ignore_case: true
13
+ regex_pattern_to_value:
14
+ \b(no|not plausible)\b: "no"
15
+ \b(yes|plausible)\b: "yes"
16
+ - function: "take_first"
17
+ - name: "strict-match"
18
+ filter:
19
+ - function: "regex"
20
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
21
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/temporal_sequences.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "temporal_sequences"
2
+ "description": "Task description: Answer questions about which times certain events could have occurred.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_temporal_sequences"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_five_objects.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "tracking_shuffled_objects_five_objects"
2
+ "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_tracking_shuffled_objects_five_objects"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_seven_objects.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "tracking_shuffled_objects_seven_objects"
2
+ "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_tracking_shuffled_objects_seven_objects"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_three_objects.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "tracking_shuffled_objects_three_objects"
2
+ "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_tracking_shuffled_objects_three_objects"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/web_of_lies.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "web_of_lies"
2
+ "description": "Evaluate a random boolean function expressed as a word problem.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_web_of_lies"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MapRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ regex_pattern_to_value:
13
+ \b(no|does not tell the truth|is not telling the truth)\b: "no"
14
+ \b(yes|tells the truth|is telling the truth)\b: "yes"
15
+ - function: "take_first"
16
+ - name: "strict-match"
17
+ filter:
18
+ - function: "regex"
19
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
20
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/word_sorting.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "word_sorting"
2
+ "description": "Sort a list of words.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_word_sorting"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.WordSortFilter
10
+ - function: "take_first"
11
+ - name: "strict-match"
12
+ filter:
13
+ - function: "regex"
14
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
15
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/coqa/README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CoQA
2
+
3
+ ### Paper
4
+
5
+ Title: `CoQA: A Conversational Question Answering Challenge`
6
+
7
+ Abstract: https://arxiv.org/pdf/1808.07042.pdf
8
+
9
+ CoQA is a large-scale dataset for building Conversational Question Answering
10
+ systems. The goal of the CoQA challenge is to measure the ability of machines to
11
+ understand a text passage and answer a series of interconnected questions that
12
+ appear in a conversation.
13
+
14
+ Homepage: https://stanfordnlp.github.io/coqa/
15
+
16
+ ### Citation
17
+
18
+ ```
19
+ BibTeX-formatted citation goes here
20
+ ```
21
+
22
+ ### Groups and Tasks
23
+
24
+ #### Groups
25
+
26
+ * Not part of a group yet
27
+
28
+ #### Tasks
29
+
30
+ * `coqa`
31
+
32
+ ### Checklist
33
+
34
+ For adding novel benchmarks/datasets to the library:
35
+ * [ ] Is the task an existing benchmark in the literature?
36
+ * [ ] Have you referenced the original paper that introduced the task?
37
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
38
+
39
+
40
+ If other tasks on this dataset are already supported:
41
+ * [ ] Is the "Main" variant of this task clearly denoted?
42
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
43
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/coqa/default.yaml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: coqa
2
+ dataset_path: EleutherAI/coqa
3
+ output_type: generate_until
4
+ training_split: train
5
+ validation_split: validation
6
+ doc_to_text: !function utils.doc_to_text
7
+ doc_to_target: !function utils.doc_to_target
8
+ process_results: !function utils.process_results
9
+ should_decontaminate: true
10
+ doc_to_decontamination_query: "{{story}} {{question.input_text|join('\n')}}"
11
+ generation_kwargs:
12
+ until:
13
+ - "\nQ:"
14
+ metric_list:
15
+ - metric: em
16
+ aggregation: mean
17
+ higher_is_better: true
18
+ - metric: f1
19
+ aggregation: mean
20
+ higher_is_better: true
21
+ metadata:
22
+ version: 3.0
23
+ dataset_kwargs:
24
+ trust_remote_code: true
lm-evaluation/build/lib/lm_eval/tasks/coqa/utils.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import zip_longest
2
+
3
+ import transformers.data.metrics.squad_metrics as squad_metrics
4
+
5
+
6
+ def doc_to_text(doc):
7
+ # Given a passage p, the conversation history {q1, a1, . . . qi−1, ai−1}
8
+ # and a question qi, the task is to predict the answer ai
9
+ doc_text = doc["story"] + "\n\n"
10
+ for q, a in zip_longest(
11
+ doc["questions"]["input_text"], doc["answers"]["input_text"][:-1]
12
+ ): # omit target answer ai
13
+ question = f"Q: {q}\n\n"
14
+ answer = f"A: {a}\n\n" if a is not None else "A:"
15
+ doc_text += question + answer
16
+ return doc_text
17
+
18
+
19
+ def doc_to_target(doc):
20
+ turn_id = len(doc["questions"]["input_text"])
21
+ # Returns unique answers and valid alternatives (Some questions in CoQA have multiple valid answers).
22
+ answers = []
23
+ answer_forturn = doc["answers"]["input_text"][turn_id - 1]
24
+ answers.append(answer_forturn)
25
+
26
+ additional_answers = doc.get("additional_answers")
27
+ if additional_answers:
28
+ for key in additional_answers:
29
+ additional_answer_for_turn = additional_answers[key]["input_text"][
30
+ turn_id - 1
31
+ ]
32
+ if additional_answer_for_turn.lower() not in map(str.lower, answers):
33
+ answers.append(additional_answer_for_turn)
34
+ return answers
35
+
36
+
37
+ def em(gold_list, pred):
38
+ # tests for exact match and on the normalised answer (compute_exact)
39
+ em_sum = 0.0
40
+ if len(gold_list) > 1:
41
+ for i in range(len(gold_list)):
42
+ gold_answers = gold_list[0:i] + gold_list[i + 1 :]
43
+ # predictions compared against (n) golds and take maximum
44
+ em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_answers)
45
+ else:
46
+ em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_list)
47
+
48
+ return em_sum / max(1, len(gold_list))
49
+
50
+
51
+ def compute_scores(gold_list, pred):
52
+ # tests for exact match and on the normalised answer (compute_exact)
53
+ # test for overlap (compute_f1)
54
+ f1_sum = 0.0
55
+ em_sum = 0.0
56
+ if len(gold_list) > 1:
57
+ for i in range(len(gold_list)):
58
+ gold_answers = gold_list[0:i] + gold_list[i + 1 :]
59
+ # predictions compared against (n) golds and take maximum
60
+ em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_answers)
61
+ f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_answers)
62
+ else:
63
+ em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_list)
64
+ f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_list)
65
+
66
+ return {
67
+ "em": em_sum / max(1, len(gold_list)),
68
+ "f1": f1_sum / max(1, len(gold_list)),
69
+ }
70
+
71
+
72
+ def process_results(doc, results):
73
+ gold_list = doc_to_target(doc)
74
+ pred = results[0].strip().split("\n")[0]
75
+
76
+ scores = compute_scores(gold_list, pred)
77
+ return scores
lm-evaluation/build/lib/lm_eval/tasks/eq_bench/README.md ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # EQ-Bench
2
+
3
+ Title: `EQ-Bench: An Emotional Intelligence Benchmark for Large Language Models`
4
+
5
+ Abstract: https://arxiv.org/abs/2312.06281
6
+
7
+ EQ-Bench is a benchmark for language models designed to assess emotional intelligence.
8
+
9
+ Why emotional intelligence? One reason is that it represents a subset of abilities that are important for the user experience, and which isn't explicitly tested by other benchmarks. Another reason is that it's not trivial to improve scores by fine tuning for the benchmark, which makes it harder to "game" the leaderboard.
10
+
11
+ EQ-Bench is a little different from traditional psychometric tests. It uses a specific question format, in which the subject has to read a dialogue then rate the intensity of possible emotional responses of one of the characters. Every question is interpretative and assesses the ability to predict the magnitude of the 4 presented emotions. The test is graded without the need for a judge (so there is no length bias). It's cheap to run (only 171 questions), and produces results that correlate strongly with human preference (Arena ELO) and multi-domain benchmarks like MMLU.
12
+
13
+ Homepage: https://eqbench.com/
14
+
15
+
16
+ NOTE: There are some key differences between the lm-evaluation-harness version and the implementation described in the EQ-Bench paper (These have been OK'd by the author):
17
+
18
+ - The lm-eval version uses the EQ-Bench v2 test set (171 questions) and score calculation. It does not incorporate the revision part of the prompt, as per v2.1 (https://github.com/EQ-bench/EQ-Bench)
19
+ - No retries in lm-eval version (EQ-Bench pipeline retries with successively higher temps if it encounters unparseable answers)
20
+ - In the original implementation, unparseable answers are excluded from the final score, and 83% of answers have to be parseable or a fail is returned. The lm-eval version instead assigns 0 to unparsable answers and has no fail criteria. So for lower performing models, there may be differences with the EQ-Bench leaderboard.
21
+
22
+
23
+ ### Citation
24
+
25
+ ```bibtex
26
+ @misc{paech2023eqbench,
27
+ title={EQ-Bench: An Emotional Intelligence Benchmark for Large Language Models},
28
+ author={Samuel J. Paech},
29
+ year={2023},
30
+ eprint={2312.06281},
31
+ archivePrefix={arXiv},
32
+ primaryClass={cs.CL}
33
+ }
34
+ ```
35
+
36
+ ### Groups and Tasks
37
+
38
+ #### Groups
39
+
40
+ * Not part of a group yet
41
+
42
+ #### Tasks
43
+
44
+ * `eq_bench`
45
+
46
+ ### Checklist
47
+
48
+ * [x] Is the task an existing benchmark in the literature?
49
+ * [x] Have you referenced the original paper that introduced the task?
50
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
51
+
52
+ If other tasks on this dataset are already supported:
53
+ * [ ] Is the "Main" variant of this task clearly denoted?
54
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
55
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?