applied-ai-018 commited on
Commit
c80136c
·
verified ·
1 Parent(s): 3a2623c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/lm_eval/__main__.py +417 -0
  2. lm-evaluation/lm_eval/logging_utils.py +455 -0
  3. lm-evaluation/lm_eval/tasks/__init__.py +446 -0
  4. lm-evaluation/lm_eval/tasks/babi/README.md +45 -0
  5. lm-evaluation/lm_eval/tasks/babi/babi.yaml +20 -0
  6. lm-evaluation/lm_eval/tasks/blimp/README.md +52 -0
  7. lm-evaluation/lm_eval/tasks/blimp/_template_yaml +14 -0
  8. lm-evaluation/lm_eval/tasks/blimp/adjunct_island.yaml +4 -0
  9. lm-evaluation/lm_eval/tasks/blimp/anaphor_gender_agreement.yaml +4 -0
  10. lm-evaluation/lm_eval/tasks/blimp/anaphor_number_agreement.yaml +4 -0
  11. lm-evaluation/lm_eval/tasks/blimp/animate_subject_passive.yaml +4 -0
  12. lm-evaluation/lm_eval/tasks/blimp/causative.yaml +4 -0
  13. lm-evaluation/lm_eval/tasks/blimp/coordinate_structure_constraint_complex_left_branch.yaml +4 -0
  14. lm-evaluation/lm_eval/tasks/blimp/coordinate_structure_constraint_object_extraction.yaml +4 -0
  15. lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_1.yaml +4 -0
  16. lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_2.yaml +4 -0
  17. lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_irregular_1.yaml +4 -0
  18. lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_irregular_2.yaml +4 -0
  19. lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_2.yaml +4 -0
  20. lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_with_adjective_1.yaml +4 -0
  21. lm-evaluation/lm_eval/tasks/blimp/distractor_agreement_relative_clause.yaml +4 -0
  22. lm-evaluation/lm_eval/tasks/blimp/drop_argument.yaml +4 -0
  23. lm-evaluation/lm_eval/tasks/blimp/ellipsis_n_bar_1.yaml +4 -0
  24. lm-evaluation/lm_eval/tasks/blimp/ellipsis_n_bar_2.yaml +4 -0
  25. lm-evaluation/lm_eval/tasks/blimp/existential_there_object_raising.yaml +4 -0
  26. lm-evaluation/lm_eval/tasks/blimp/existential_there_quantifiers_1.yaml +4 -0
  27. lm-evaluation/lm_eval/tasks/blimp/existential_there_quantifiers_2.yaml +4 -0
  28. lm-evaluation/lm_eval/tasks/blimp/expletive_it_object_raising.yaml +4 -0
  29. lm-evaluation/lm_eval/tasks/blimp/generate_configs.py +94 -0
  30. lm-evaluation/lm_eval/tasks/blimp/inchoative.yaml +4 -0
  31. lm-evaluation/lm_eval/tasks/blimp/intransitive.yaml +4 -0
  32. lm-evaluation/lm_eval/tasks/blimp/irregular_past_participle_adjectives.yaml +4 -0
  33. lm-evaluation/lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_1.yaml +4 -0
  34. lm-evaluation/lm_eval/tasks/blimp/left_branch_island_echo_question.yaml +4 -0
  35. lm-evaluation/lm_eval/tasks/blimp/left_branch_island_simple_question.yaml +4 -0
  36. lm-evaluation/lm_eval/tasks/blimp/matrix_question_npi_licensor_present.yaml +4 -0
  37. lm-evaluation/lm_eval/tasks/blimp/npi_present_1.yaml +4 -0
  38. lm-evaluation/lm_eval/tasks/blimp/npi_present_2.yaml +4 -0
  39. lm-evaluation/lm_eval/tasks/blimp/only_npi_scope.yaml +4 -0
  40. lm-evaluation/lm_eval/tasks/blimp/passive_1.yaml +4 -0
  41. lm-evaluation/lm_eval/tasks/blimp/passive_2.yaml +4 -0
  42. lm-evaluation/lm_eval/tasks/blimp/principle_A_c_command.yaml +4 -0
  43. lm-evaluation/lm_eval/tasks/blimp/principle_A_case_2.yaml +4 -0
  44. lm-evaluation/lm_eval/tasks/blimp/principle_A_domain_1.yaml +4 -0
  45. lm-evaluation/lm_eval/tasks/blimp/principle_A_domain_3.yaml +4 -0
  46. lm-evaluation/lm_eval/tasks/blimp/principle_A_reconstruction.yaml +4 -0
  47. lm-evaluation/lm_eval/tasks/blimp/regular_plural_subject_verb_agreement_2.yaml +4 -0
  48. lm-evaluation/lm_eval/tasks/blimp/sentential_negation_npi_scope.yaml +4 -0
  49. lm-evaluation/lm_eval/tasks/blimp/sentential_subject_island.yaml +4 -0
  50. lm-evaluation/lm_eval/tasks/blimp/superlative_quantifiers_2.yaml +4 -0
lm-evaluation/lm_eval/__main__.py ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import logging
4
+ import os
5
+ import re
6
+ import sys
7
+ from functools import partial
8
+ from pathlib import Path
9
+ from typing import Union
10
+
11
+ import numpy as np
12
+
13
+ from lm_eval import evaluator, utils
14
+ from lm_eval.evaluator import request_caching_arg_to_dict
15
+ from lm_eval.logging_utils import WandbLogger
16
+ from lm_eval.tasks import TaskManager
17
+ from lm_eval.utils import make_table, simple_parse_args_string
18
+
19
+
20
+ DEFAULT_RESULTS_FILE = "results.json"
21
+
22
+
23
+ def _handle_non_serializable(o):
24
+ if isinstance(o, np.int64) or isinstance(o, np.int32):
25
+ return int(o)
26
+ elif isinstance(o, set):
27
+ return list(o)
28
+ else:
29
+ return str(o)
30
+
31
+
32
+ def _int_or_none_list_arg_type(max_len: int, value: str, split_char: str = ","):
33
+ def parse_value(item):
34
+ item = item.strip().lower()
35
+ if item == "none":
36
+ return None
37
+ try:
38
+ return int(item)
39
+ except ValueError:
40
+ raise argparse.ArgumentTypeError(f"{item} is not an integer or None")
41
+
42
+ items = [parse_value(v) for v in value.split(split_char)]
43
+ num_items = len(items)
44
+
45
+ if num_items == 1:
46
+ # Makes downstream handling the same for single and multiple values
47
+ items = items * max_len
48
+ elif num_items != max_len:
49
+ raise argparse.ArgumentTypeError(
50
+ f"Argument requires {max_len} integers or None, separated by '{split_char}'"
51
+ )
52
+
53
+ return items
54
+
55
+
56
+ def check_argument_types(parser: argparse.ArgumentParser):
57
+ """
58
+ Check to make sure all CLI args are typed, raises error if not
59
+ """
60
+ for action in parser._actions:
61
+ if action.dest != "help" and not action.const:
62
+ if action.type is None:
63
+ raise ValueError(
64
+ f"Argument '{action.dest}' doesn't have a type specified."
65
+ )
66
+ else:
67
+ continue
68
+
69
+
70
+ def setup_parser() -> argparse.ArgumentParser:
71
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
72
+ parser.add_argument(
73
+ "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`"
74
+ )
75
+ parser.add_argument(
76
+ "--tasks",
77
+ "-t",
78
+ default=None,
79
+ type=str,
80
+ metavar="task1,task2",
81
+ help="To get full list of tasks, use the command lm-eval --tasks list",
82
+ )
83
+ parser.add_argument(
84
+ "--model_args",
85
+ "-a",
86
+ default="",
87
+ type=str,
88
+ help="Comma separated string arguments for model, e.g. `pretrained=EleutherAI/pythia-160m,dtype=float32`",
89
+ )
90
+ parser.add_argument(
91
+ "--num_fewshot",
92
+ "-f",
93
+ type=int,
94
+ default=None,
95
+ metavar="N",
96
+ help="Number of examples in few-shot context",
97
+ )
98
+ parser.add_argument(
99
+ "--batch_size",
100
+ "-b",
101
+ type=str,
102
+ default=1,
103
+ metavar="auto|auto:N|N",
104
+ help="Acceptable values are 'auto', 'auto:N' or N, where N is an integer. Default 1.",
105
+ )
106
+ parser.add_argument(
107
+ "--max_batch_size",
108
+ type=int,
109
+ default=None,
110
+ metavar="N",
111
+ help="Maximal batch size to try with --batch_size auto.",
112
+ )
113
+ parser.add_argument(
114
+ "--device",
115
+ type=str,
116
+ default=None,
117
+ help="Device to use (e.g. cuda, cuda:0, cpu).",
118
+ )
119
+ parser.add_argument(
120
+ "--output_path",
121
+ "-o",
122
+ default=None,
123
+ type=str,
124
+ metavar="DIR|DIR/file.json",
125
+ help="The path to the output file where the result metrics will be saved. If the path is a directory and log_samples is true, the results will be saved in the directory. Else the parent directory will be used.",
126
+ )
127
+ parser.add_argument(
128
+ "--limit",
129
+ "-L",
130
+ type=float,
131
+ default=None,
132
+ metavar="N|0<N<1",
133
+ help="Limit the number of examples per task. "
134
+ "If <1, limit is a percentage of the total number of examples.",
135
+ )
136
+ parser.add_argument(
137
+ "--use_cache",
138
+ "-c",
139
+ type=str,
140
+ default=None,
141
+ metavar="DIR",
142
+ help="A path to a sqlite db file for caching model responses. `None` if not caching.",
143
+ )
144
+ parser.add_argument(
145
+ "--cache_requests",
146
+ type=str,
147
+ default=None,
148
+ choices=["true", "refresh", "delete"],
149
+ help="Speed up evaluation by caching the building of dataset requests. `None` if not caching.",
150
+ )
151
+ parser.add_argument(
152
+ "--check_integrity",
153
+ action="store_true",
154
+ help="Whether to run the relevant part of the test suite for the tasks.",
155
+ )
156
+ parser.add_argument(
157
+ "--write_out",
158
+ "-w",
159
+ action="store_true",
160
+ default=False,
161
+ help="Prints the prompt for the first few documents.",
162
+ )
163
+ parser.add_argument(
164
+ "--log_samples",
165
+ "-s",
166
+ action="store_true",
167
+ default=False,
168
+ help="If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis. Use with --output_path.",
169
+ )
170
+ parser.add_argument(
171
+ "--show_config",
172
+ action="store_true",
173
+ default=False,
174
+ help="If True, shows the the full config of all tasks at the end of the evaluation.",
175
+ )
176
+ parser.add_argument(
177
+ "--include_path",
178
+ type=str,
179
+ default=None,
180
+ metavar="DIR",
181
+ help="Additional path to include if there are external tasks to include.",
182
+ )
183
+ parser.add_argument(
184
+ "--gen_kwargs",
185
+ type=str,
186
+ default=None,
187
+ help=(
188
+ "String arguments for model generation on greedy_until tasks,"
189
+ " e.g. `temperature=0,top_k=0,top_p=0`."
190
+ ),
191
+ )
192
+ parser.add_argument(
193
+ "--verbosity",
194
+ "-v",
195
+ type=str.upper,
196
+ default="INFO",
197
+ metavar="CRITICAL|ERROR|WARNING|INFO|DEBUG",
198
+ help="Controls the reported logging error level. Set to DEBUG when testing + adding new task configurations for comprehensive log output.",
199
+ )
200
+ parser.add_argument(
201
+ "--wandb_args",
202
+ type=str,
203
+ default="",
204
+ help="Comma separated string arguments passed to wandb.init, e.g. `project=lm-eval,job_type=eval",
205
+ )
206
+ parser.add_argument(
207
+ "--predict_only",
208
+ "-x",
209
+ action="store_true",
210
+ default=False,
211
+ help="Use with --log_samples. Only model outputs will be saved and metrics will not be evaluated.",
212
+ )
213
+ parser.add_argument(
214
+ "--seed",
215
+ type=partial(_int_or_none_list_arg_type, 3),
216
+ default="0,1234,1234", # for backward compatibility
217
+ help=(
218
+ "Set seed for python's random, numpy and torch.\n"
219
+ "Accepts a comma-separated list of 3 values for python's random, numpy, and torch seeds, respectively, "
220
+ "or a single integer to set the same seed for all three.\n"
221
+ "The values are either an integer or 'None' to not set the seed. Default is `0,1234,1234` (for backward compatibility).\n"
222
+ "E.g. `--seed 0,None,8` sets `random.seed(0)` and `torch.manual_seed(8)`. Here numpy's seed is not set since the second value is `None`.\n"
223
+ "E.g, `--seed 42` sets all three seeds to 42."
224
+ ),
225
+ )
226
+ parser.add_argument(
227
+ "--trust_remote_code",
228
+ action="store_true",
229
+ help="Sets trust_remote_code to True to execute code to create HF Datasets from the Hub",
230
+ )
231
+
232
+ return parser
233
+
234
+
235
+ def parse_eval_args(parser: argparse.ArgumentParser) -> argparse.Namespace:
236
+ check_argument_types(parser)
237
+ return parser.parse_args()
238
+
239
+
240
+ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
241
+ if not args:
242
+ # we allow for args to be passed externally, else we parse them ourselves
243
+ parser = setup_parser()
244
+ args = parse_eval_args(parser)
245
+
246
+ if args.wandb_args:
247
+ wandb_logger = WandbLogger(**simple_parse_args_string(args.wandb_args))
248
+
249
+ eval_logger = utils.eval_logger
250
+ eval_logger.setLevel(getattr(logging, f"{args.verbosity}"))
251
+ eval_logger.info(f"Verbosity set to {args.verbosity}")
252
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
253
+
254
+ if args.predict_only:
255
+ args.log_samples = True
256
+ if (args.log_samples or args.predict_only) and not args.output_path:
257
+ raise ValueError(
258
+ "Specify --output_path if providing --log_samples or --predict_only"
259
+ )
260
+
261
+ if args.include_path is not None:
262
+ eval_logger.info(f"Including path: {args.include_path}")
263
+ task_manager = TaskManager(args.verbosity, include_path=args.include_path)
264
+
265
+ if args.limit:
266
+ eval_logger.warning(
267
+ " --limit SHOULD ONLY BE USED FOR TESTING."
268
+ "REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT."
269
+ )
270
+
271
+ if args.tasks is None:
272
+ eval_logger.error("Need to specify task to evaluate.")
273
+ sys.exit()
274
+ elif args.tasks == "list":
275
+ eval_logger.info(
276
+ "Available Tasks:\n - {}".format("\n - ".join(task_manager.all_tasks))
277
+ )
278
+ sys.exit()
279
+ else:
280
+ if os.path.isdir(args.tasks):
281
+ import glob
282
+
283
+ task_names = []
284
+ yaml_path = os.path.join(args.tasks, "*.yaml")
285
+ for yaml_file in glob.glob(yaml_path):
286
+ config = utils.load_yaml_config(yaml_file)
287
+ task_names.append(config)
288
+ else:
289
+ task_list = args.tasks.split(",")
290
+ task_names = task_manager.match_tasks(task_list)
291
+ for task in [task for task in task_list if task not in task_names]:
292
+ if os.path.isfile(task):
293
+ config = utils.load_yaml_config(task)
294
+ task_names.append(config)
295
+ task_missing = [
296
+ task for task in task_list if task not in task_names and "*" not in task
297
+ ] # we don't want errors if a wildcard ("*") task name was used
298
+
299
+ if task_missing:
300
+ missing = ", ".join(task_missing)
301
+ eval_logger.error(
302
+ f"Tasks were not found: {missing}\n"
303
+ f"{utils.SPACING}Try `lm-eval --tasks list` for list of available tasks",
304
+ )
305
+ raise ValueError(
306
+ f"Tasks not found: {missing}. Try `lm-eval --tasks list` for list of available tasks, or '--verbosity DEBUG' to troubleshoot task registration issues."
307
+ )
308
+
309
+ if args.output_path:
310
+ path = Path(args.output_path)
311
+ # check if file or 'dir/results.json' exists
312
+ if path.is_file():
313
+ raise FileExistsError(f"File already exists at {path}")
314
+ output_path_file = path.joinpath(DEFAULT_RESULTS_FILE)
315
+ if output_path_file.is_file():
316
+ eval_logger.warning(
317
+ f"File {output_path_file} already exists. Results will be overwritten."
318
+ )
319
+ # if path json then get parent dir
320
+ elif path.suffix in (".json", ".jsonl"):
321
+ output_path_file = path
322
+ path.parent.mkdir(parents=True, exist_ok=True)
323
+ path = path.parent
324
+ else:
325
+ path.mkdir(parents=True, exist_ok=True)
326
+
327
+ # Respect user's value passed in via CLI, otherwise default to True and add to comma-separated model args
328
+ if args.trust_remote_code:
329
+ os.environ["HF_DATASETS_TRUST_REMOTE_CODE"] = str(args.trust_remote_code)
330
+ args.model_args = (
331
+ args.model_args
332
+ + f",trust_remote_code={os.environ['HF_DATASETS_TRUST_REMOTE_CODE']}"
333
+ )
334
+
335
+ eval_logger.info(f"Selected Tasks: {task_names}")
336
+
337
+ request_caching_args = request_caching_arg_to_dict(
338
+ cache_requests=args.cache_requests
339
+ )
340
+
341
+ results = evaluator.simple_evaluate(
342
+ model=args.model,
343
+ model_args=args.model_args,
344
+ tasks=task_names,
345
+ num_fewshot=args.num_fewshot,
346
+ batch_size=args.batch_size,
347
+ max_batch_size=args.max_batch_size,
348
+ device=args.device,
349
+ use_cache=args.use_cache,
350
+ limit=args.limit,
351
+ check_integrity=args.check_integrity,
352
+ write_out=args.write_out,
353
+ log_samples=args.log_samples,
354
+ gen_kwargs=args.gen_kwargs,
355
+ task_manager=task_manager,
356
+ verbosity=args.verbosity,
357
+ predict_only=args.predict_only,
358
+ random_seed=args.seed[0],
359
+ numpy_random_seed=args.seed[1],
360
+ torch_random_seed=args.seed[2],
361
+ **request_caching_args,
362
+ )
363
+
364
+ if results is not None:
365
+ if args.log_samples:
366
+ samples = results.pop("samples")
367
+ dumped = json.dumps(
368
+ results, indent=2, default=_handle_non_serializable, ensure_ascii=False
369
+ )
370
+ if args.show_config:
371
+ print(dumped)
372
+
373
+ batch_sizes = ",".join(map(str, results["config"]["batch_sizes"]))
374
+
375
+ # Add W&B logging
376
+ if args.wandb_args:
377
+ try:
378
+ wandb_logger.post_init(results)
379
+ wandb_logger.log_eval_result()
380
+ if args.log_samples:
381
+ wandb_logger.log_eval_samples(samples)
382
+ except Exception as e:
383
+ eval_logger.info(f"Logging to Weights and Biases failed due to {e}")
384
+
385
+ if args.output_path:
386
+ output_path_file.open("w", encoding="utf-8").write(dumped)
387
+
388
+ if args.log_samples:
389
+ for task_name, config in results["configs"].items():
390
+ output_name = "{}_{}".format(
391
+ re.sub(r"[\"<>:/\|\\?\*\[\]]+", "__", args.model_args),
392
+ task_name,
393
+ )
394
+ filename = path.joinpath(f"{output_name}.jsonl")
395
+ samples_dumped = json.dumps(
396
+ samples[task_name],
397
+ indent=2,
398
+ default=_handle_non_serializable,
399
+ ensure_ascii=False,
400
+ )
401
+ filename.write_text(samples_dumped, encoding="utf-8")
402
+
403
+ print(
404
+ f"{args.model} ({args.model_args}), gen_kwargs: ({args.gen_kwargs}), limit: {args.limit}, num_fewshot: {args.num_fewshot}, "
405
+ f"batch_size: {args.batch_size}{f' ({batch_sizes})' if batch_sizes else ''}"
406
+ )
407
+ print(make_table(results))
408
+ if "groups" in results:
409
+ print(make_table(results, "groups"))
410
+
411
+ if args.wandb_args:
412
+ # Tear down wandb run once all the logging is done.
413
+ wandb_logger.run.finish()
414
+
415
+
416
+ if __name__ == "__main__":
417
+ cli_evaluate()
lm-evaluation/lm_eval/logging_utils.py ADDED
@@ -0,0 +1,455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import json
3
+ import logging
4
+ import os
5
+ import re
6
+ import subprocess
7
+ from pathlib import Path
8
+ from typing import Any, Dict, List, Literal, Optional, Tuple, Union
9
+
10
+ import numpy as np
11
+ import pandas as pd
12
+ from packaging.version import Version
13
+ from torch.utils.collect_env import get_pretty_env_info
14
+ from transformers import __version__ as trans_version
15
+
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ def remove_none_pattern(input_string: str) -> Tuple[str, bool]:
21
+ """Remove the ',none' substring from the input_string if it exists at the end.
22
+
23
+ Args:
24
+ input_string (str): The input string from which to remove the ',none' substring.
25
+
26
+ Returns:
27
+ Tuple[str, bool]: A tuple containing the modified input_string with the ',none' substring removed
28
+ and a boolean indicating whether the modification was made (True) or not (False).
29
+ """
30
+ # Define the pattern to match ',none' at the end of the string
31
+ pattern = re.compile(r",none$")
32
+
33
+ # Use sub() to replace ',none' with an empty string
34
+ result = re.sub(pattern, "", input_string)
35
+
36
+ # check if the input_string changed
37
+ removed = result != input_string
38
+
39
+ return result, removed
40
+
41
+
42
+ def _handle_non_serializable(o: Any) -> Union[int, str, list]:
43
+ """Handle non-serializable objects by converting them to serializable types.
44
+
45
+ Args:
46
+ o (Any): The object to be handled.
47
+
48
+ Returns:
49
+ Union[int, str, list]: The converted object. If the object is of type np.int64 or np.int32,
50
+ it will be converted to int. If the object is of type set, it will be converted
51
+ to a list. Otherwise, it will be converted to str.
52
+ """
53
+ if isinstance(o, np.int64) or isinstance(o, np.int32):
54
+ return int(o)
55
+ elif isinstance(o, set):
56
+ return list(o)
57
+ else:
58
+ return str(o)
59
+
60
+
61
+ def get_wandb_printer() -> Literal["Printer"]:
62
+ """Returns a wandb printer instance for pretty stdout."""
63
+ from wandb.sdk.lib.printer import get_printer
64
+ from wandb.sdk.wandb_settings import Settings
65
+
66
+ printer = get_printer(Settings()._jupyter)
67
+ return printer
68
+
69
+
70
+ class WandbLogger:
71
+ def __init__(self, **kwargs) -> None:
72
+ """Attaches to wandb logger if already initialized. Otherwise, passes kwargs to wandb.init()
73
+
74
+ Args:
75
+ kwargs Optional[Any]: Arguments for configuration.
76
+
77
+ Parse and log the results returned from evaluator.simple_evaluate() with:
78
+ wandb_logger.post_init(results)
79
+ wandb_logger.log_eval_result()
80
+ wandb_logger.log_eval_samples(results["samples"])
81
+ """
82
+ try:
83
+ import wandb
84
+
85
+ assert Version(wandb.__version__) >= Version("0.13.6")
86
+ if Version(wandb.__version__) < Version("0.13.6"):
87
+ wandb.require("report-editing:v0")
88
+ except Exception as e:
89
+ logger.warning(
90
+ "To use the wandb reporting functionality please install wandb>=0.13.6.\n"
91
+ "To install the latest version of wandb run `pip install wandb --upgrade`\n"
92
+ f"{e}"
93
+ )
94
+
95
+ self.wandb_args: Dict[str, Any] = kwargs
96
+
97
+ # initialize a W&B run
98
+ if wandb.run is None:
99
+ self.run = wandb.init(**self.wandb_args)
100
+ else:
101
+ self.run = wandb.run
102
+
103
+ self.printer = get_wandb_printer()
104
+
105
+ def post_init(self, results: Dict[str, Any]) -> None:
106
+ self.results: Dict[str, Any] = copy.deepcopy(results)
107
+ self.task_names: List[str] = list(results.get("results", {}).keys())
108
+ self.group_names: List[str] = list(results.get("groups", {}).keys())
109
+
110
+ def _get_config(self) -> Dict[str, Any]:
111
+ """Get configuration parameters."""
112
+ self.task_configs = self.results.get("configs", {})
113
+ cli_configs = self.results.get("config", {})
114
+ configs = {
115
+ "task_configs": self.task_configs,
116
+ "cli_configs": cli_configs,
117
+ }
118
+
119
+ return configs
120
+
121
+ def _sanitize_results_dict(self) -> Tuple[Dict[str, str], Dict[str, Any]]:
122
+ """Sanitize the results dictionary."""
123
+ _results = copy.deepcopy(self.results.get("results", dict()))
124
+
125
+ # Remove None from the metric string name
126
+ tmp_results = copy.deepcopy(_results)
127
+ for task_name in self.task_names:
128
+ task_result = tmp_results.get(task_name, dict())
129
+ for metric_name, metric_value in task_result.items():
130
+ _metric_name, removed = remove_none_pattern(metric_name)
131
+ if removed:
132
+ _results[task_name][_metric_name] = metric_value
133
+ _results[task_name].pop(metric_name)
134
+
135
+ # remove string valued keys from the results dict
136
+ wandb_summary = {}
137
+ for task in self.task_names:
138
+ task_result = _results.get(task, dict())
139
+ for metric_name, metric_value in task_result.items():
140
+ if isinstance(metric_value, str):
141
+ wandb_summary[f"{task}/{metric_name}"] = metric_value
142
+
143
+ for summary_metric, summary_value in wandb_summary.items():
144
+ _task, _summary_metric = summary_metric.split("/")
145
+ _results[_task].pop(_summary_metric)
146
+
147
+ tmp_results = copy.deepcopy(_results)
148
+ for task_name, task_results in tmp_results.items():
149
+ for metric_name, metric_value in task_results.items():
150
+ _results[f"{task_name}/{metric_name}"] = metric_value
151
+ _results[task_name].pop(metric_name)
152
+ for task in self.task_names:
153
+ _results.pop(task)
154
+
155
+ return wandb_summary, _results
156
+
157
+ def _log_results_as_table(self) -> None:
158
+ """Generate and log evaluation results as a table to W&B."""
159
+ columns = [
160
+ "Version",
161
+ "Filter",
162
+ "num_fewshot",
163
+ "Metric",
164
+ "Value",
165
+ "Stderr",
166
+ ]
167
+
168
+ def make_table(columns: List[str], key: str = "results"):
169
+ import wandb
170
+
171
+ table = wandb.Table(columns=columns)
172
+ results = copy.deepcopy(self.results)
173
+
174
+ for k, dic in results.get(key).items():
175
+ if k in self.group_names and not key == "groups":
176
+ continue
177
+ version = results.get("versions").get(k)
178
+ if version == "N/A":
179
+ version = None
180
+ n = results.get("n-shot").get(k)
181
+
182
+ for (mf), v in dic.items():
183
+ m, _, f = mf.partition(",")
184
+ if m.endswith("_stderr"):
185
+ continue
186
+ if m == "alias":
187
+ continue
188
+
189
+ if m + "_stderr" + "," + f in dic:
190
+ se = dic[m + "_stderr" + "," + f]
191
+ if se != "N/A":
192
+ se = "%.4f" % se
193
+ table.add_data(*[k, version, f, n, m, str(v), str(se)])
194
+ else:
195
+ table.add_data(*[k, version, f, n, m, str(v), ""])
196
+
197
+ return table
198
+
199
+ # log the complete eval result to W&B Table
200
+ table = make_table(["Tasks"] + columns, "results")
201
+ self.run.log({"evaluation/eval_results": table})
202
+
203
+ if "groups" in self.results.keys():
204
+ table = make_table(["Groups"] + columns, "groups")
205
+ self.run.log({"evaluation/group_eval_results": table})
206
+
207
+ def _log_results_as_artifact(self) -> None:
208
+ """Log results as JSON artifact to W&B."""
209
+ import wandb
210
+
211
+ dumped = json.dumps(
212
+ self.results, indent=2, default=_handle_non_serializable, ensure_ascii=False
213
+ )
214
+ artifact = wandb.Artifact("results", type="eval_results")
215
+ with artifact.new_file("results.json", mode="w", encoding="utf-8") as f:
216
+ f.write(dumped)
217
+ self.run.log_artifact(artifact)
218
+
219
+ def log_eval_result(self) -> None:
220
+ """Log evaluation results to W&B."""
221
+ # Log configs to wandb
222
+ configs = self._get_config()
223
+ self.run.config.update(configs)
224
+
225
+ wandb_summary, self.wandb_results = self._sanitize_results_dict()
226
+ # update wandb.run.summary with items that were removed
227
+ self.run.summary.update(wandb_summary)
228
+ # Log the evaluation metrics to wandb
229
+ self.run.log(self.wandb_results)
230
+ # Log the evaluation metrics as W&B Table
231
+ self._log_results_as_table()
232
+ # Log the results dict as json to W&B Artifacts
233
+ self._log_results_as_artifact()
234
+
235
+ def _generate_dataset(
236
+ self, data: List[Dict[str, Any]], config: Dict[str, Any]
237
+ ) -> pd.DataFrame:
238
+ """Generate a dataset from evaluation data.
239
+
240
+ Args:
241
+ data (List[Dict[str, Any]]): The data to generate a dataset for.
242
+ config (Dict[str, Any]): The configuration of the task.
243
+
244
+ Returns:
245
+ pd.DataFrame: A dataframe that is ready to be uploaded to W&B.
246
+ """
247
+ ids = [x["doc_id"] for x in data]
248
+ labels = [x["target"] for x in data]
249
+ instance = [""] * len(ids)
250
+ resps = [""] * len(ids)
251
+ filtered_resps = [""] * len(ids)
252
+ model_outputs = {}
253
+
254
+ metrics_list = config["metric_list"]
255
+ metrics = {}
256
+ for metric in metrics_list:
257
+ metric = metric.get("metric")
258
+ if metric in ["word_perplexity", "byte_perplexity", "bits_per_byte"]:
259
+ metrics[f"{metric}_loglikelihood"] = [x[metric][0] for x in data]
260
+ if metric in ["byte_perplexity", "bits_per_byte"]:
261
+ metrics[f"{metric}_bytes"] = [x[metric][1] for x in data]
262
+ else:
263
+ metrics[f"{metric}_words"] = [x[metric][1] for x in data]
264
+ else:
265
+ metrics[metric] = [x[metric] for x in data]
266
+
267
+ if config["output_type"] == "loglikelihood":
268
+ instance = [x["arguments"][0][0] for x in data]
269
+ labels = [x["arguments"][0][1] for x in data]
270
+ resps = [
271
+ f'log probability of continuation is {x["resps"][0][0][0]} '
272
+ + "\n\n"
273
+ + "continuation will {} generated with greedy sampling".format(
274
+ "not be" if not x["resps"][0][0][1] else "be"
275
+ )
276
+ for x in data
277
+ ]
278
+ filtered_resps = [
279
+ f'log probability of continuation is {x["filtered_resps"][0][0]} '
280
+ + "\n\n"
281
+ + "continuation will {} generated with greedy sampling".format(
282
+ "not be" if not x["filtered_resps"][0][1] else "be"
283
+ )
284
+ for x in data
285
+ ]
286
+ elif config["output_type"] == "multiple_choice":
287
+ instance = [x["arguments"][0][0] for x in data]
288
+ choices = [
289
+ "\n".join([f"{idx}. {y[1]}" for idx, y in enumerate(x["arguments"])])
290
+ for x in data
291
+ ]
292
+ resps = [np.argmax([n[0][0] for n in x["resps"]]) for x in data]
293
+ filtered_resps = [
294
+ np.argmax([n[0] for n in x["filtered_resps"]]) for x in data
295
+ ]
296
+ elif config["output_type"] == "loglikelihood_rolling":
297
+ instance = [x["arguments"][0][0] for x in data]
298
+ resps = [x["resps"][0][0] for x in data]
299
+ filtered_resps = [x["filtered_resps"][0] for x in data]
300
+ elif config["output_type"] == "generate_until":
301
+ instance = [x["arguments"][0][0] for x in data]
302
+ resps = [x["resps"][0][0] for x in data]
303
+ filtered_resps = [x["filtered_resps"][0] for x in data]
304
+
305
+ model_outputs["raw_predictions"] = resps
306
+ model_outputs["filtered_predictions"] = filtered_resps
307
+
308
+ df_data = {
309
+ "id": ids,
310
+ "data": instance,
311
+ }
312
+ if config["output_type"] == "multiple_choice":
313
+ df_data["choices"] = choices
314
+
315
+ tmp_data = {
316
+ "input_len": [len(x) for x in instance],
317
+ "labels": labels,
318
+ "output_type": config["output_type"],
319
+ }
320
+ df_data.update(tmp_data)
321
+ df_data.update(model_outputs)
322
+ df_data.update(metrics)
323
+
324
+ return pd.DataFrame(df_data)
325
+
326
+ def _log_samples_as_artifact(
327
+ self, data: List[Dict[str, Any]], task_name: str
328
+ ) -> None:
329
+ import wandb
330
+
331
+ # log the samples as an artifact
332
+ dumped = json.dumps(
333
+ data,
334
+ indent=2,
335
+ default=_handle_non_serializable,
336
+ ensure_ascii=False,
337
+ )
338
+ artifact = wandb.Artifact(f"{task_name}", type="samples_by_task")
339
+ with artifact.new_file(
340
+ f"{task_name}_eval_samples.json", mode="w", encoding="utf-8"
341
+ ) as f:
342
+ f.write(dumped)
343
+ self.run.log_artifact(artifact)
344
+ # artifact.wait()
345
+
346
+ def log_eval_samples(self, samples: Dict[str, List[Dict[str, Any]]]) -> None:
347
+ """Log evaluation samples to W&B.
348
+
349
+ Args:
350
+ samples (Dict[str, List[Dict[str, Any]]]): Evaluation samples for each task.
351
+ """
352
+ task_names: List[str] = [
353
+ x for x in self.task_names if x not in self.group_names
354
+ ]
355
+
356
+ ungrouped_tasks = []
357
+ tasks_by_groups = {}
358
+
359
+ for task_name in task_names:
360
+ group_names = self.task_configs[task_name].get("group", None)
361
+ if group_names:
362
+ if isinstance(group_names, str):
363
+ group_names = [group_names]
364
+
365
+ for group_name in group_names:
366
+ if not tasks_by_groups.get(group_name):
367
+ tasks_by_groups[group_name] = [task_name]
368
+ else:
369
+ tasks_by_groups[group_name].append(task_name)
370
+ else:
371
+ ungrouped_tasks.append(task_name)
372
+
373
+ for task_name in ungrouped_tasks:
374
+ eval_preds = samples[task_name]
375
+
376
+ # log the samples as a W&B Table
377
+ df = self._generate_dataset(eval_preds, self.task_configs.get(task_name))
378
+ self.run.log({f"{task_name}_eval_results": df})
379
+
380
+ # log the samples as a json file as W&B Artifact
381
+ self._log_samples_as_artifact(eval_preds, task_name)
382
+
383
+ for group, grouped_tasks in tasks_by_groups.items():
384
+ grouped_df = pd.DataFrame()
385
+ for task_name in grouped_tasks:
386
+ eval_preds = samples[task_name]
387
+ df = self._generate_dataset(
388
+ eval_preds, self.task_configs.get(task_name)
389
+ )
390
+ df["group"] = group
391
+ df["task"] = task_name
392
+ grouped_df = pd.concat([grouped_df, df], ignore_index=True)
393
+
394
+ # log the samples as a json file as W&B Artifact
395
+ self._log_samples_as_artifact(eval_preds, task_name)
396
+
397
+ self.run.log({f"{group}_eval_results": grouped_df})
398
+
399
+
400
+ def get_commit_from_path(repo_path: Union[Path, str]) -> Optional[str]:
401
+ try:
402
+ git_folder = Path(repo_path, ".git")
403
+ if git_folder.is_file():
404
+ git_folder = Path(
405
+ git_folder.parent,
406
+ git_folder.read_text(encoding="utf-8").split("\n")[0].split(" ")[-1],
407
+ )
408
+ if Path(git_folder, "HEAD").exists():
409
+ head_name = (
410
+ Path(git_folder, "HEAD")
411
+ .read_text(encoding="utf-8")
412
+ .split("\n")[0]
413
+ .split(" ")[-1]
414
+ )
415
+ head_ref = Path(git_folder, head_name)
416
+ git_hash = head_ref.read_text(encoding="utf-8").replace("\n", "")
417
+ else:
418
+ git_hash = None
419
+ except Exception as err:
420
+ logger.debug(
421
+ f"Failed to retrieve a Git commit hash from path: {str(repo_path)}. Error: {err}"
422
+ )
423
+ return None
424
+ return git_hash
425
+
426
+
427
+ def get_git_commit_hash():
428
+ """
429
+ Gets the git commit hash of your current repo (if it exists).
430
+ Source: https://github.com/EleutherAI/gpt-neox/blob/b608043be541602170bfcfb8ec9bf85e8a0799e0/megatron/neox_arguments/neox_args.py#L42
431
+ """
432
+ try:
433
+ git_hash = subprocess.check_output(["git", "describe", "--always"]).strip()
434
+ git_hash = git_hash.decode()
435
+ except (subprocess.CalledProcessError, FileNotFoundError):
436
+ # FileNotFoundError occurs when git not installed on system
437
+ git_hash = get_commit_from_path(os.getcwd()) # git hash of repo if exists
438
+ return git_hash
439
+
440
+
441
+ def add_env_info(storage: Dict[str, Any]):
442
+ try:
443
+ pretty_env_info = get_pretty_env_info()
444
+ except Exception as err:
445
+ pretty_env_info = str(err)
446
+ transformers_version = trans_version
447
+ upper_dir_commit = get_commit_from_path(
448
+ Path(os.getcwd(), "..")
449
+ ) # git hash of upper repo if exists
450
+ added_info = {
451
+ "pretty_env_info": pretty_env_info,
452
+ "transformers_version": transformers_version,
453
+ "upper_git_hash": upper_dir_commit, # in case this repo is submodule
454
+ }
455
+ storage.update(added_info)
lm-evaluation/lm_eval/tasks/__init__.py ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import logging
3
+ import os
4
+ from functools import partial
5
+ from typing import Dict, List, Mapping, Optional, Union
6
+
7
+ from lm_eval import utils
8
+ from lm_eval.api.task import ConfigurableTask, Task
9
+
10
+
11
+ class TaskManager:
12
+ """TaskManager indexes all tasks from the default `lm_eval/tasks/`
13
+ and an optional directory if provided.
14
+
15
+ """
16
+
17
+ def __init__(self, verbosity="INFO", include_path: Optional[str] = None) -> None:
18
+ self.verbosity = verbosity
19
+ self.include_path = include_path
20
+ self.logger = utils.eval_logger
21
+ self.logger.setLevel(getattr(logging, f"{verbosity}"))
22
+
23
+ self._task_index = self.initialize_tasks(include_path=include_path)
24
+ self._all_tasks = sorted(list(self._task_index.keys()))
25
+
26
+ self.task_group_map = collections.defaultdict(list)
27
+
28
+ def initialize_tasks(self, include_path: Optional[str] = None):
29
+ """Creates a dictionary of tasks index.
30
+
31
+ :param include_path: str = None
32
+ An additional path to be searched for tasks
33
+
34
+ :return
35
+ Dictionary of task names as key and task metadata
36
+ """
37
+ all_paths = [os.path.dirname(os.path.abspath(__file__)) + "/"]
38
+ if include_path is not None:
39
+ if isinstance(include_path, str):
40
+ include_path = [include_path]
41
+ all_paths.extend(include_path)
42
+
43
+ task_index = {}
44
+ for task_dir in all_paths:
45
+ tasks = self._get_task_and_group(task_dir)
46
+ task_index = {**tasks, **task_index}
47
+
48
+ return task_index
49
+
50
+ @property
51
+ def all_tasks(self):
52
+ return self._all_tasks
53
+
54
+ @property
55
+ def task_index(self):
56
+ return self._task_index
57
+
58
+ def match_tasks(self, task_list):
59
+ return utils.pattern_match(task_list, self.all_tasks)
60
+
61
+ def _name_is_registered(self, name) -> bool:
62
+ if name in self.all_tasks:
63
+ return True
64
+ return False
65
+
66
+ def _name_is_task(self, name) -> bool:
67
+ if self._name_is_registered(name) and ("task" in self.task_index[name]["type"]):
68
+ return True
69
+ return False
70
+
71
+ def _name_is_group(self, name) -> bool:
72
+ if self._name_is_registered(name) and (
73
+ self.task_index[name]["type"] == "group"
74
+ ):
75
+ return True
76
+ return False
77
+
78
+ def _name_is_python_task(self, name):
79
+ if self._name_is_registered(name) and (
80
+ self.task_index[name]["type"] == "python_task"
81
+ ):
82
+ return True
83
+ return False
84
+
85
+ def _config_is_task(self, config) -> bool:
86
+ if ("task" in config) and isinstance(config["task"], str):
87
+ return True
88
+ return False
89
+
90
+ def _config_is_group(self, config) -> bool:
91
+ if ("task" in config) and isinstance(config["task"], list):
92
+ return True
93
+ return False
94
+
95
+ def _config_is_python_task(self, config) -> bool:
96
+ if "class" in config:
97
+ return True
98
+ return False
99
+
100
+ def _get_yaml_path(self, name):
101
+ if name not in self.task_index:
102
+ raise ValueError
103
+ return self.task_index[name]["yaml_path"]
104
+
105
+ def _get_config(self, name):
106
+ if name not in self.task_index:
107
+ raise ValueError
108
+ yaml_path = self._get_yaml_path(name)
109
+ if yaml_path == -1:
110
+ return {}
111
+ else:
112
+ return utils.load_yaml_config(yaml_path, mode="full")
113
+
114
+ def _get_tasklist(self, name):
115
+ if self._name_is_task(name):
116
+ raise ValueError
117
+ return self.task_index[name]["task"]
118
+
119
+ def _process_alias(self, config, group=None):
120
+ # If the group is not the same as the original
121
+ # group which the group alias was intended for,
122
+ # Set the group_alias to None instead.
123
+ if ("group_alias" in config) and ("group" in config) and group is not None:
124
+ if config["group"] != group:
125
+ config["group_alias"] = None
126
+ return config
127
+
128
+ def _load_individual_task_or_group(
129
+ self,
130
+ name_or_config: Optional[Union[str, dict]] = None,
131
+ parent_name: Optional[str] = None,
132
+ update_config: Optional[dict] = None,
133
+ yaml_path: Optional[str] = None,
134
+ ) -> Mapping:
135
+ def load_task(config, task, group=None, yaml_path=None):
136
+ if "include" in config:
137
+ if yaml_path is None:
138
+ raise ValueError
139
+ config.update(
140
+ utils.load_yaml_config(
141
+ yaml_path,
142
+ yaml_config={"include": config.pop("include")},
143
+ mode="full",
144
+ )
145
+ )
146
+ if self._config_is_python_task(config):
147
+ task_object = config["class"]()
148
+ else:
149
+ config = self._process_alias(config, group=group)
150
+ task_object = ConfigurableTask(config=config)
151
+ if group is not None:
152
+ task_object = (group, task_object)
153
+ return {task: task_object}
154
+
155
+ if isinstance(name_or_config, str):
156
+ if update_config is not None:
157
+ # Process name_or_config as a dict instead
158
+ name_or_config = {"task": name_or_config, **update_config}
159
+ elif self._name_is_task(name_or_config):
160
+ task_config = self._get_config(name_or_config)
161
+ return load_task(task_config, task=name_or_config, group=parent_name)
162
+ else:
163
+ group_name = name_or_config
164
+ subtask_list = self._get_tasklist(name_or_config)
165
+ if subtask_list == -1:
166
+ group_config = self._get_config(name_or_config)
167
+ subtask_list = group_config["task"]
168
+
169
+ # This checks if we're at the root.
170
+ if parent_name is None:
171
+ group_config = self._get_config(name_or_config)
172
+ if set(group_config.keys()) > {"task", "group"}:
173
+ update_config = {
174
+ k: v
175
+ for k, v in group_config.items()
176
+ if k not in ["task", "group"]
177
+ }
178
+ yaml_path = self._get_yaml_path(group_name)
179
+
180
+ if (update_config is not None) and ("group_alias" in update_config):
181
+ group_name = update_config["group_alias"]
182
+ update_config.pop("group_alias")
183
+
184
+ if isinstance(name_or_config, dict):
185
+ if update_config is not None:
186
+ name_or_config = {
187
+ **name_or_config,
188
+ **update_config,
189
+ }
190
+
191
+ if self._config_is_task(name_or_config):
192
+ name = name_or_config["task"]
193
+ # If the name is registered as a group
194
+ # if self._name_is_task(name) is False:
195
+ if self._name_is_group(name):
196
+ group_name = name
197
+ update_config = {
198
+ k: v for k, v in name_or_config.items() if k != "task"
199
+ }
200
+ subtask_list = self._get_tasklist(name)
201
+ if subtask_list == -1:
202
+ subtask_list = self._get_config(name)["task"]
203
+ else:
204
+ if self._name_is_registered(name):
205
+ base_task_config = self._get_config(name)
206
+
207
+ # Check if this is a duplicate.
208
+ if parent_name is not None:
209
+ name_or_config["group"] = parent_name
210
+ num_duplicate = len(
211
+ list(
212
+ filter(
213
+ lambda x: x.startswith(name),
214
+ self.task_group_map[parent_name],
215
+ )
216
+ )
217
+ )
218
+ if num_duplicate > 0:
219
+ name = f"{name}-{num_duplicate}"
220
+ self.task_group_map[parent_name].append(name)
221
+
222
+ task_config = {
223
+ **base_task_config,
224
+ **name_or_config,
225
+ }
226
+ else:
227
+ task_config = name_or_config
228
+ return load_task(
229
+ task_config, task=name, group=parent_name, yaml_path=yaml_path
230
+ )
231
+ else:
232
+ group_name = name_or_config["group"]
233
+ subtask_list = name_or_config["task"]
234
+ if set(name_or_config.keys()) > {"task", "group"}:
235
+ update_config = {
236
+ k: v
237
+ for k, v in name_or_config.items()
238
+ if k not in ["task", "group"]
239
+ }
240
+
241
+ all_subtasks = {}
242
+ if parent_name is not None:
243
+ all_subtasks = {group_name: (parent_name, None)}
244
+
245
+ fn = partial(
246
+ self._load_individual_task_or_group,
247
+ parent_name=group_name,
248
+ update_config=update_config,
249
+ yaml_path=yaml_path,
250
+ )
251
+ all_subtasks = {
252
+ **all_subtasks,
253
+ **dict(collections.ChainMap(*map(fn, subtask_list))),
254
+ }
255
+ return all_subtasks
256
+
257
+ def load_task_or_group(self, task_list: Optional[Union[str, list]] = None) -> dict:
258
+ """Loads a dictionary of task objects from a list
259
+
260
+ :param task_list: Union[str, list] = None
261
+ Single string or list of string of task names to be loaded
262
+
263
+ :return
264
+ Dictionary of task objects
265
+ """
266
+ if isinstance(task_list, str):
267
+ task_list = [task_list]
268
+
269
+ all_loaded_tasks = dict(
270
+ collections.ChainMap(*map(self._load_individual_task_or_group, task_list))
271
+ )
272
+ return all_loaded_tasks
273
+
274
+ def load_config(self, config: Dict):
275
+ return self._load_individual_task_or_group(config)
276
+
277
+ def _get_task_and_group(self, task_dir: str):
278
+ """Creates a dictionary of tasks index with the following metadata,
279
+ - `type`, that can be either `task`, `python_task`, or `group`.
280
+ `task` refer to regular task configs, `python_task` are special
281
+ yaml files that only consists of `task` and `class` parameters.
282
+ `group` are group configs.
283
+ - `yaml_path`, path to the yaml file. If the entry is a `group` that
284
+ was configured through a task config, the yaml_path will be -1
285
+ and all subtasks will be listed in `task` (see below)
286
+ - `task`, reserved for entries with `type` as `group`. This will list
287
+ all subtasks. When a group config is created (as opposed to task
288
+ config having `group` parameter set), this will be set to -1 to
289
+ avoid recursive indexing. The whole list of subtasks will be loaded
290
+ at evaluation.
291
+
292
+ :param task_dir: str
293
+ A directory to check for tasks
294
+
295
+ :return
296
+ Dictionary of task names as key and task metadata
297
+ """
298
+ tasks_and_groups = collections.defaultdict()
299
+ for root, _, file_list in os.walk(task_dir):
300
+ for f in file_list:
301
+ if f.endswith(".yaml"):
302
+ yaml_path = os.path.join(root, f)
303
+ config = utils.load_yaml_config(yaml_path, mode="simple")
304
+ if self._config_is_python_task(config):
305
+ # This is a python class config
306
+ tasks_and_groups[config["task"]] = {
307
+ "type": "python_task",
308
+ "yaml_path": yaml_path,
309
+ }
310
+ elif self._config_is_group(config):
311
+ # This is a group config
312
+ tasks_and_groups[config["group"]] = {
313
+ "type": "group",
314
+ "task": -1, # This signals that
315
+ # we don't need to know
316
+ # the task list for indexing
317
+ # as it can be loaded
318
+ # when called.
319
+ "yaml_path": yaml_path,
320
+ }
321
+
322
+ # # Registered the level 1 tasks from a group config
323
+ # for config in config["task"]:
324
+ # if isinstance(config, dict) and self._config_is_task(config):
325
+ # task = config["task"]
326
+ # tasks_and_groups[task] = {
327
+ # "type": "task",
328
+ # "yaml_path": yaml_path,
329
+ # }
330
+
331
+ elif self._config_is_task(config):
332
+ # This is a task config
333
+ task = config["task"]
334
+ tasks_and_groups[task] = {
335
+ "type": "task",
336
+ "yaml_path": yaml_path,
337
+ }
338
+
339
+ if "group" in config:
340
+ groups = config["group"]
341
+ if isinstance(config["group"], str):
342
+ groups = [groups]
343
+
344
+ for group in groups:
345
+ if group not in tasks_and_groups:
346
+ tasks_and_groups[group] = {
347
+ "type": "group",
348
+ "task": [task],
349
+ "yaml_path": -1,
350
+ }
351
+ else:
352
+ tasks_and_groups[group]["task"].append(task)
353
+ else:
354
+ self.logger.debug(f"File {f} in {root} could not be loaded")
355
+
356
+ return tasks_and_groups
357
+
358
+
359
+ def get_task_name_from_config(task_config: Dict[str, str]) -> str:
360
+ if "task" in task_config:
361
+ return task_config["task"]
362
+ if "dataset_name" in task_config:
363
+ return "{dataset_path}_{dataset_name}".format(**task_config)
364
+ else:
365
+ return "{dataset_path}".format(**task_config)
366
+
367
+
368
+ def get_task_name_from_object(task_object):
369
+ if hasattr(task_object, "config"):
370
+ return task_object._config["task"]
371
+
372
+ # TODO: scrap this
373
+ # this gives a mechanism for non-registered tasks to have a custom name anyways when reporting
374
+ return (
375
+ task_object.EVAL_HARNESS_NAME
376
+ if hasattr(task_object, "EVAL_HARNESS_NAME")
377
+ else type(task_object).__name__
378
+ )
379
+
380
+
381
+ def get_task_dict(
382
+ task_name_list: Union[str, List[Union[str, Dict, Task]]],
383
+ task_manager: Optional[TaskManager] = None,
384
+ ):
385
+ """Creates a dictionary of task objects from either a name of task, config, or prepared Task object.
386
+
387
+ :param task_name_list: List[Union[str, Dict, Task]]
388
+ Name of model or LM object, see lm_eval.models.get_model
389
+ :param task_manager: TaskManager = None
390
+ A TaskManager object that stores indexed tasks. If not set,
391
+ task_manager will load one. This should be set by the user
392
+ if there are additional paths that want to be included
393
+ via `include_path`
394
+
395
+ :return
396
+ Dictionary of task objects
397
+ """
398
+ task_name_from_string_dict = {}
399
+ task_name_from_config_dict = {}
400
+ task_name_from_object_dict = {}
401
+
402
+ if isinstance(task_name_list, str):
403
+ task_name_list = [task_name_list]
404
+ elif isinstance(task_name_list, list):
405
+ if not all([isinstance(task, (str, dict, Task)) for task in task_name_list]):
406
+ raise TypeError(
407
+ "Expected all list items to be of types 'str', 'dict', or 'Task', but at least one entry did not match."
408
+ )
409
+ else:
410
+ raise TypeError(
411
+ f"Expected a 'str' or 'list' but received {type(task_name_list)}."
412
+ )
413
+
414
+ string_task_name_list = [task for task in task_name_list if isinstance(task, str)]
415
+ others_task_name_list = [task for task in task_name_list if ~isinstance(task, str)]
416
+ if len(string_task_name_list) > 0:
417
+ if task_manager is None:
418
+ task_manager = TaskManager()
419
+
420
+ task_name_from_string_dict = task_manager.load_task_or_group(
421
+ string_task_name_list
422
+ )
423
+
424
+ for task_element in others_task_name_list:
425
+ if isinstance(task_element, dict):
426
+ task_name_from_config_dict = {
427
+ **task_name_from_config_dict,
428
+ **task_manager.load_config(config=task_element),
429
+ }
430
+
431
+ elif isinstance(task_element, Task):
432
+ task_name_from_object_dict = {
433
+ **task_name_from_object_dict,
434
+ get_task_name_from_object(task_element): task_element,
435
+ }
436
+
437
+ if not set(task_name_from_string_dict.keys()).isdisjoint(
438
+ set(task_name_from_object_dict.keys())
439
+ ):
440
+ raise ValueError
441
+
442
+ return {
443
+ **task_name_from_string_dict,
444
+ **task_name_from_config_dict,
445
+ **task_name_from_object_dict,
446
+ }
lm-evaluation/lm_eval/tasks/babi/README.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # bAbI
2
+
3
+ ### Paper
4
+
5
+ Title: Towards ai-complete question answering: A set of prerequisite toy tasks
6
+ Abstract: https://arxiv.org/abs/1502.05698
7
+
8
+ One long-term goal of machine learning research is to produce methods that are applicable to reasoning and natural language, in particular building an intelligent dialogue agent. To measure progress towards that goal, we argue for the usefulness of a set of proxy tasks that evaluate reading comprehension via question answering. Our tasks measure understanding in several ways: whether a system is able to answer questions via chaining facts, simple induction, deduction and many more. The tasks are designed to be prerequisites for any system that aims to be capable of conversing with a human. We believe many existing learning systems can currently not solve them, and hence our aim is to classify these tasks into skill sets, so that researchers can identify (and then rectify) the failings of their systems. We also extend and improve the recently introduced Memory Networks model, and show it is able to solve some, but not all, of the tasks.
9
+
10
+ Homepage: https://github.com/facebookarchive/bAbI-tasks
11
+
12
+
13
+ ### Citation
14
+
15
+ ```
16
+ @article{weston2015towards,
17
+ title={Towards ai-complete question answering: A set of prerequisite toy tasks},
18
+ author={Weston, Jason and Bordes, Antoine and Chopra, Sumit and Rush, Alexander M and Van Merri{\"e}nboer, Bart and Joulin, Armand and Mikolov, Tomas},
19
+ journal={arXiv preprint arXiv:1502.05698},
20
+ year={2015}
21
+ }
22
+ ```
23
+
24
+ ### Groups and Tasks
25
+
26
+ #### Groups
27
+
28
+ * Not part of a group yet
29
+
30
+ #### Tasks
31
+
32
+ * `babi`
33
+
34
+ ### Checklist
35
+
36
+ For adding novel benchmarks/datasets to the library:
37
+ * [ ] Is the task an existing benchmark in the literature?
38
+ * [ ] Have you referenced the original paper that introduced the task?
39
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
40
+
41
+
42
+ If other tasks on this dataset are already supported:
43
+ * [ ] Is the "Main" variant of this task clearly denoted?
44
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
45
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/lm_eval/tasks/babi/babi.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: babi
2
+ dataset_path: Muennighoff/babi
3
+ dataset_name: null
4
+ output_type: generate_until
5
+ training_split: train
6
+ validation_split: valid
7
+ test_split: test
8
+ doc_to_text: "Passage: {{passage}}Question: {{question}}\nAnswer:"
9
+ doc_to_target: " {{answer}}"
10
+ target_delimiter: ""
11
+ generation_kwargs:
12
+ until:
13
+ - "\n"
14
+ - "Passage:"
15
+ metric_list:
16
+ - metric: exact_match
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ metadata:
20
+ version: 1.0
lm-evaluation/lm_eval/tasks/blimp/README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Task-name
2
+
3
+ ### Paper
4
+
5
+ Title: `BLiMP: A Benchmark of Linguistic Minimal Pairs for English`
6
+ Abstract: `https://arxiv.org/abs/1912.00582`
7
+
8
+ BLiMP is a challenge set for evaluating what language models (LMs) know about
9
+ major grammatical phenomena in English. BLiMP consists of 67 sub-datasets, each
10
+ containing 1000 minimal pairs isolating specific contrasts in syntax, morphology,
11
+ or semantics. The data is automatically generated according to expert-crafted
12
+ grammars.
13
+
14
+ Homepage: https://github.com/alexwarstadt/blimp
15
+
16
+
17
+ ### Citation
18
+
19
+ ```
20
+ @article{warstadt2019blimp,
21
+ author = {Warstadt, Alex and Parrish, Alicia and Liu, Haokun and Mohananey, Anhad and Peng, Wei and Wang, Sheng-Fu and Bowman, Samuel R.},
22
+ title = {BLiMP: The Benchmark of Linguistic Minimal Pairs for English},
23
+ journal = {Transactions of the Association for Computational Linguistics},
24
+ volume = {8},
25
+ number = {},
26
+ pages = {377-392},
27
+ year = {2020},
28
+ doi = {10.1162/tacl\_a\_00321},
29
+ URL = {https://doi.org/10.1162/tacl_a_00321},
30
+ eprint = {https://doi.org/10.1162/tacl_a_00321},
31
+ abstract = { We introduce The Benchmark of Linguistic Minimal Pairs (BLiMP),1 a challenge set for evaluating the linguistic knowledge of language models (LMs) on major grammatical phenomena in English. BLiMP consists of 67 individual datasets, each containing 1,000 minimal pairs—that is, pairs of minimally different sentences that contrast in grammatical acceptability and isolate specific phenomenon in syntax, morphology, or semantics. We generate the data according to linguist-crafted grammar templates, and human aggregate agreement with the labels is 96.4\%. We evaluate n-gram, LSTM, and Transformer (GPT-2 and Transformer-XL) LMs by observing whether they assign a higher probability to the acceptable sentence in each minimal pair. We find that state-of-the-art models identify morphological contrasts related to agreement reliably, but they struggle with some subtle semantic and syntactic phenomena, such as negative polarity items and extraction islands. }
32
+ }
33
+ ```
34
+
35
+ ### Subtasks
36
+
37
+ List or describe tasks defined in this folder, and their names here:
38
+ * `task_name`: `1-sentence description of what this particular task does`
39
+ * `task_name2`: .....
40
+
41
+ ### Checklist
42
+
43
+ For adding novel benchmarks/datasets to the library:
44
+ * [ ] Is the task an existing benchmark in the literature?
45
+ * [ ] Have you referenced the original paper that introduced the task?
46
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
47
+
48
+
49
+ If other tasks on this dataset are already supported:
50
+ * [ ] Is the "Main" variant of this task clearly denoted?
51
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
52
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/lm_eval/tasks/blimp/_template_yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: blimp
2
+ dataset_path: blimp
3
+ output_type: multiple_choice
4
+ validation_split: train
5
+ doc_to_text: ""
6
+ doc_to_target: 0
7
+ doc_to_choice: "{{[sentence_good, sentence_bad]}}"
8
+ num_fewshot: 0
9
+ should_decontaminate: true
10
+ doc_to_decontamination_query: "{{sentence_good}} {{sentence_bad}}"
11
+ metric_list:
12
+ - metric: acc
13
+ metadata:
14
+ version: 1.0
lm-evaluation/lm_eval/tasks/blimp/adjunct_island.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: adjunct_island
3
+ include: _template_yaml
4
+ task: blimp_adjunct_island
lm-evaluation/lm_eval/tasks/blimp/anaphor_gender_agreement.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: anaphor_gender_agreement
3
+ include: _template_yaml
4
+ task: blimp_anaphor_gender_agreement
lm-evaluation/lm_eval/tasks/blimp/anaphor_number_agreement.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: anaphor_number_agreement
3
+ include: _template_yaml
4
+ task: blimp_anaphor_number_agreement
lm-evaluation/lm_eval/tasks/blimp/animate_subject_passive.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: animate_subject_passive
3
+ include: _template_yaml
4
+ task: blimp_animate_subject_passive
lm-evaluation/lm_eval/tasks/blimp/causative.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: causative
3
+ include: _template_yaml
4
+ task: blimp_causative
lm-evaluation/lm_eval/tasks/blimp/coordinate_structure_constraint_complex_left_branch.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: coordinate_structure_constraint_complex_left_branch
3
+ include: _template_yaml
4
+ task: blimp_coordinate_structure_constraint_complex_left_branch
lm-evaluation/lm_eval/tasks/blimp/coordinate_structure_constraint_object_extraction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: coordinate_structure_constraint_object_extraction
3
+ include: _template_yaml
4
+ task: blimp_coordinate_structure_constraint_object_extraction
lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: determiner_noun_agreement_1
3
+ include: _template_yaml
4
+ task: blimp_determiner_noun_agreement_1
lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: determiner_noun_agreement_2
3
+ include: _template_yaml
4
+ task: blimp_determiner_noun_agreement_2
lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_irregular_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: determiner_noun_agreement_irregular_1
3
+ include: _template_yaml
4
+ task: blimp_determiner_noun_agreement_irregular_1
lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_irregular_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: determiner_noun_agreement_irregular_2
3
+ include: _template_yaml
4
+ task: blimp_determiner_noun_agreement_irregular_2
lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: determiner_noun_agreement_with_adj_2
3
+ include: _template_yaml
4
+ task: blimp_determiner_noun_agreement_with_adj_2
lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_with_adjective_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: determiner_noun_agreement_with_adjective_1
3
+ include: _template_yaml
4
+ task: blimp_determiner_noun_agreement_with_adjective_1
lm-evaluation/lm_eval/tasks/blimp/distractor_agreement_relative_clause.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: distractor_agreement_relative_clause
3
+ include: _template_yaml
4
+ task: blimp_distractor_agreement_relative_clause
lm-evaluation/lm_eval/tasks/blimp/drop_argument.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: drop_argument
3
+ include: _template_yaml
4
+ task: blimp_drop_argument
lm-evaluation/lm_eval/tasks/blimp/ellipsis_n_bar_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: ellipsis_n_bar_1
3
+ include: _template_yaml
4
+ task: blimp_ellipsis_n_bar_1
lm-evaluation/lm_eval/tasks/blimp/ellipsis_n_bar_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: ellipsis_n_bar_2
3
+ include: _template_yaml
4
+ task: blimp_ellipsis_n_bar_2
lm-evaluation/lm_eval/tasks/blimp/existential_there_object_raising.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: existential_there_object_raising
3
+ include: _template_yaml
4
+ task: blimp_existential_there_object_raising
lm-evaluation/lm_eval/tasks/blimp/existential_there_quantifiers_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: existential_there_quantifiers_1
3
+ include: _template_yaml
4
+ task: blimp_existential_there_quantifiers_1
lm-evaluation/lm_eval/tasks/blimp/existential_there_quantifiers_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: existential_there_quantifiers_2
3
+ include: _template_yaml
4
+ task: blimp_existential_there_quantifiers_2
lm-evaluation/lm_eval/tasks/blimp/expletive_it_object_raising.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: expletive_it_object_raising
3
+ include: _template_yaml
4
+ task: blimp_expletive_it_object_raising
lm-evaluation/lm_eval/tasks/blimp/generate_configs.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+
3
+
4
+ all_subtasks = [
5
+ "adjunct_island",
6
+ "anaphor_gender_agreement",
7
+ "anaphor_number_agreement",
8
+ "animate_subject_passive",
9
+ "animate_subject_trans",
10
+ "causative",
11
+ "complex_NP_island",
12
+ "coordinate_structure_constraint_complex_left_branch",
13
+ "coordinate_structure_constraint_object_extraction",
14
+ "determiner_noun_agreement_1",
15
+ "determiner_noun_agreement_2",
16
+ "determiner_noun_agreement_irregular_1",
17
+ "determiner_noun_agreement_irregular_2",
18
+ "determiner_noun_agreement_with_adj_2",
19
+ "determiner_noun_agreement_with_adj_irregular_1",
20
+ "determiner_noun_agreement_with_adj_irregular_2",
21
+ "determiner_noun_agreement_with_adjective_1",
22
+ "distractor_agreement_relational_noun",
23
+ "distractor_agreement_relative_clause",
24
+ "drop_argument",
25
+ "ellipsis_n_bar_1",
26
+ "ellipsis_n_bar_2",
27
+ "existential_there_object_raising",
28
+ "existential_there_quantifiers_1",
29
+ "existential_there_quantifiers_2",
30
+ "existential_there_subject_raising",
31
+ "expletive_it_object_raising",
32
+ "inchoative",
33
+ "intransitive",
34
+ "irregular_past_participle_adjectives",
35
+ "irregular_past_participle_verbs",
36
+ "irregular_plural_subject_verb_agreement_1",
37
+ "irregular_plural_subject_verb_agreement_2",
38
+ "left_branch_island_echo_question",
39
+ "left_branch_island_simple_question",
40
+ "matrix_question_npi_licensor_present",
41
+ "npi_present_1",
42
+ "npi_present_2",
43
+ "only_npi_licensor_present",
44
+ "only_npi_scope",
45
+ "passive_1",
46
+ "passive_2",
47
+ "principle_A_c_command",
48
+ "principle_A_case_1",
49
+ "principle_A_case_2",
50
+ "principle_A_domain_1",
51
+ "principle_A_domain_2",
52
+ "principle_A_domain_3",
53
+ "principle_A_reconstruction",
54
+ "regular_plural_subject_verb_agreement_1",
55
+ "regular_plural_subject_verb_agreement_2",
56
+ "sentential_negation_npi_licensor_present",
57
+ "sentential_negation_npi_scope",
58
+ "sentential_subject_island",
59
+ "superlative_quantifiers_1",
60
+ "superlative_quantifiers_2",
61
+ "tough_vs_raising_1",
62
+ "tough_vs_raising_2",
63
+ "transitive",
64
+ "wh_island",
65
+ "wh_questions_object_gap",
66
+ "wh_questions_subject_gap",
67
+ "wh_questions_subject_gap_long_distance",
68
+ "wh_vs_that_no_gap",
69
+ "wh_vs_that_no_gap_long_distance",
70
+ "wh_vs_that_with_gap",
71
+ "wh_vs_that_with_gap_long_distance",
72
+ ]
73
+
74
+
75
+ def main() -> None:
76
+ for task in all_subtasks:
77
+ file_name = f"{task}.yaml"
78
+ try:
79
+ with open(f"{file_name}", "w", encoding="utf-8") as f:
80
+ f.write("# Generated by utils.py\n")
81
+ yaml.dump(
82
+ {
83
+ "include": "_template_yaml",
84
+ "task": "blimp_" + task,
85
+ "dataset_name": task,
86
+ },
87
+ f,
88
+ )
89
+ except FileExistsError:
90
+ pass
91
+
92
+
93
+ if __name__ == "__main__":
94
+ main()
lm-evaluation/lm_eval/tasks/blimp/inchoative.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: inchoative
3
+ include: _template_yaml
4
+ task: blimp_inchoative
lm-evaluation/lm_eval/tasks/blimp/intransitive.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: intransitive
3
+ include: _template_yaml
4
+ task: blimp_intransitive
lm-evaluation/lm_eval/tasks/blimp/irregular_past_participle_adjectives.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: irregular_past_participle_adjectives
3
+ include: _template_yaml
4
+ task: blimp_irregular_past_participle_adjectives
lm-evaluation/lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: irregular_plural_subject_verb_agreement_1
3
+ include: _template_yaml
4
+ task: blimp_irregular_plural_subject_verb_agreement_1
lm-evaluation/lm_eval/tasks/blimp/left_branch_island_echo_question.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: left_branch_island_echo_question
3
+ include: _template_yaml
4
+ task: blimp_left_branch_island_echo_question
lm-evaluation/lm_eval/tasks/blimp/left_branch_island_simple_question.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: left_branch_island_simple_question
3
+ include: _template_yaml
4
+ task: blimp_left_branch_island_simple_question
lm-evaluation/lm_eval/tasks/blimp/matrix_question_npi_licensor_present.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: matrix_question_npi_licensor_present
3
+ include: _template_yaml
4
+ task: blimp_matrix_question_npi_licensor_present
lm-evaluation/lm_eval/tasks/blimp/npi_present_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: npi_present_1
3
+ include: _template_yaml
4
+ task: blimp_npi_present_1
lm-evaluation/lm_eval/tasks/blimp/npi_present_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: npi_present_2
3
+ include: _template_yaml
4
+ task: blimp_npi_present_2
lm-evaluation/lm_eval/tasks/blimp/only_npi_scope.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: only_npi_scope
3
+ include: _template_yaml
4
+ task: blimp_only_npi_scope
lm-evaluation/lm_eval/tasks/blimp/passive_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: passive_1
3
+ include: _template_yaml
4
+ task: blimp_passive_1
lm-evaluation/lm_eval/tasks/blimp/passive_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: passive_2
3
+ include: _template_yaml
4
+ task: blimp_passive_2
lm-evaluation/lm_eval/tasks/blimp/principle_A_c_command.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: principle_A_c_command
3
+ include: _template_yaml
4
+ task: blimp_principle_A_c_command
lm-evaluation/lm_eval/tasks/blimp/principle_A_case_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: principle_A_case_2
3
+ include: _template_yaml
4
+ task: blimp_principle_A_case_2
lm-evaluation/lm_eval/tasks/blimp/principle_A_domain_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: principle_A_domain_1
3
+ include: _template_yaml
4
+ task: blimp_principle_A_domain_1
lm-evaluation/lm_eval/tasks/blimp/principle_A_domain_3.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: principle_A_domain_3
3
+ include: _template_yaml
4
+ task: blimp_principle_A_domain_3
lm-evaluation/lm_eval/tasks/blimp/principle_A_reconstruction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: principle_A_reconstruction
3
+ include: _template_yaml
4
+ task: blimp_principle_A_reconstruction
lm-evaluation/lm_eval/tasks/blimp/regular_plural_subject_verb_agreement_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: regular_plural_subject_verb_agreement_2
3
+ include: _template_yaml
4
+ task: blimp_regular_plural_subject_verb_agreement_2
lm-evaluation/lm_eval/tasks/blimp/sentential_negation_npi_scope.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: sentential_negation_npi_scope
3
+ include: _template_yaml
4
+ task: blimp_sentential_negation_npi_scope
lm-evaluation/lm_eval/tasks/blimp/sentential_subject_island.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: sentential_subject_island
3
+ include: _template_yaml
4
+ task: blimp_sentential_subject_island
lm-evaluation/lm_eval/tasks/blimp/superlative_quantifiers_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: superlative_quantifiers_2
3
+ include: _template_yaml
4
+ task: blimp_superlative_quantifiers_2